Repository: GAIR-NLP/ToRL
Branch: main
Commit: 1db091d9cbd3
Files: 221
Total size: 1.6 MB
Directory structure:
gitextract_g7clz9ov/
├── data/
│ └── torl_data/
│ ├── test.parquet
│ ├── test_0524.parquet
│ └── train.parquet
├── readme.md
├── requirements.txt
├── scripts/
│ └── torl_1.5b.sh
└── verl/
├── __init__.py
├── models/
│ ├── README.md
│ ├── __init__.py
│ ├── llama/
│ │ ├── __init__.py
│ │ └── megatron/
│ │ ├── __init__.py
│ │ ├── checkpoint_utils/
│ │ │ ├── __init__.py
│ │ │ ├── llama_loader.py
│ │ │ └── llama_saver.py
│ │ ├── layers/
│ │ │ ├── __init__.py
│ │ │ ├── parallel_attention.py
│ │ │ ├── parallel_decoder.py
│ │ │ ├── parallel_linear.py
│ │ │ ├── parallel_mlp.py
│ │ │ └── parallel_rmsnorm.py
│ │ └── modeling_llama_megatron.py
│ ├── qwen2/
│ │ ├── __init__.py
│ │ └── megatron/
│ │ ├── __init__.py
│ │ ├── checkpoint_utils/
│ │ │ ├── __init__.py
│ │ │ ├── qwen2_loader.py
│ │ │ └── qwen2_saver.py
│ │ ├── layers/
│ │ │ ├── __init__.py
│ │ │ ├── parallel_attention.py
│ │ │ ├── parallel_decoder.py
│ │ │ ├── parallel_linear.py
│ │ │ ├── parallel_mlp.py
│ │ │ └── parallel_rmsnorm.py
│ │ └── modeling_qwen2_megatron.py
│ ├── registry.py
│ ├── transformers/
│ │ ├── __init__.py
│ │ ├── llama.py
│ │ ├── monkey_patch.py
│ │ ├── qwen2.py
│ │ └── qwen2_vl.py
│ └── weight_loader_registry.py
├── protocol.py
├── single_controller/
│ ├── __init__.py
│ ├── base/
│ │ ├── __init__.py
│ │ ├── decorator.py
│ │ ├── megatron/
│ │ │ ├── __init__.py
│ │ │ ├── worker.py
│ │ │ └── worker_group.py
│ │ ├── register_center/
│ │ │ ├── __init__.py
│ │ │ └── ray.py
│ │ ├── worker.py
│ │ └── worker_group.py
│ └── ray/
│ ├── __init__.py
│ ├── base.py
│ └── megatron.py
├── third_party/
│ ├── __init__.py
│ └── vllm/
│ ├── __init__.py
│ ├── vllm_spmd/
│ │ ├── __init__.py
│ │ └── dtensor_weight_loaders.py
│ ├── vllm_v_0_3_1/
│ │ ├── __init__.py
│ │ ├── arg_utils.py
│ │ ├── config.py
│ │ ├── llm.py
│ │ ├── llm_engine_sp.py
│ │ ├── model_loader.py
│ │ ├── model_runner.py
│ │ ├── parallel_state.py
│ │ ├── tokenizer.py
│ │ ├── weight_loaders.py
│ │ └── worker.py
│ ├── vllm_v_0_4_2/
│ │ ├── __init__.py
│ │ ├── arg_utils.py
│ │ ├── config.py
│ │ ├── dtensor_weight_loaders.py
│ │ ├── hf_weight_loader.py
│ │ ├── llm.py
│ │ ├── llm_engine_sp.py
│ │ ├── megatron_weight_loaders.py
│ │ ├── model_loader.py
│ │ ├── model_runner.py
│ │ ├── parallel_state.py
│ │ ├── spmd_gpu_executor.py
│ │ ├── tokenizer.py
│ │ └── worker.py
│ ├── vllm_v_0_5_4/
│ │ ├── __init__.py
│ │ ├── arg_utils.py
│ │ ├── config.py
│ │ ├── dtensor_weight_loaders.py
│ │ ├── hf_weight_loader.py
│ │ ├── llm.py
│ │ ├── llm_engine_sp.py
│ │ ├── megatron_weight_loaders.py
│ │ ├── model_loader.py
│ │ ├── model_runner.py
│ │ ├── parallel_state.py
│ │ ├── spmd_gpu_executor.py
│ │ ├── tokenizer.py
│ │ └── worker.py
│ └── vllm_v_0_6_3/
│ ├── __init__.py
│ ├── arg_utils.py
│ ├── config.py
│ ├── dtensor_weight_loaders.py
│ ├── hf_weight_loader.py
│ ├── llm.py
│ ├── llm_engine_sp.py
│ ├── megatron_weight_loaders.py
│ ├── model_loader.py
│ ├── model_runner.py
│ ├── parallel_state.py
│ ├── spmd_gpu_executor.py
│ ├── tokenizer.py
│ └── worker.py
├── trainer/
│ ├── __init__.py
│ ├── config/
│ │ ├── evaluation.yaml
│ │ ├── generation.yaml
│ │ ├── ppo_megatron_trainer.yaml
│ │ ├── ppo_trainer.yaml
│ │ └── sft_trainer.yaml
│ ├── fsdp_sft_trainer.py
│ ├── main_eval.py
│ ├── main_generation.py
│ ├── main_ppo.py
│ ├── ppo/
│ │ ├── __init__.py
│ │ ├── core_algos.py
│ │ └── ray_trainer.py
│ └── runtime_env.yaml
├── utils/
│ ├── __init__.py
│ ├── checkpoint/
│ │ ├── __init__.py
│ │ ├── checkpoint_manager.py
│ │ └── fsdp_checkpoint_manager.py
│ ├── config.py
│ ├── dataset/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── rl_dataset.py
│ │ ├── rm_dataset.py
│ │ └── sft_dataset.py
│ ├── debug/
│ │ ├── __init__.py
│ │ ├── performance.py
│ │ └── trajectory_tracker.py
│ ├── distributed.py
│ ├── flops_counter.py
│ ├── fs.py
│ ├── fsdp_utils.py
│ ├── hdfs_io.py
│ ├── import_utils.py
│ ├── logger/
│ │ ├── __init__.py
│ │ └── aggregate_logger.py
│ ├── logging_utils.py
│ ├── megatron/
│ │ ├── __init__.py
│ │ ├── memory.py
│ │ ├── optimizer.py
│ │ ├── pipeline_parallel.py
│ │ ├── sequence_parallel.py
│ │ └── tensor_parallel.py
│ ├── megatron_utils.py
│ ├── memory_buffer.py
│ ├── model.py
│ ├── py_functional.py
│ ├── ray_utils.py
│ ├── rendezvous/
│ │ ├── __init__.py
│ │ └── ray_backend.py
│ ├── reward_score/
│ │ ├── __init__.py
│ │ ├── eval.py
│ │ ├── geo3k.py
│ │ ├── gsm8k.py
│ │ ├── math.py
│ │ ├── math_verifier.py
│ │ ├── prime_code/
│ │ │ ├── __init__.py
│ │ │ ├── testing_util.py
│ │ │ └── utils.py
│ │ └── prime_math/
│ │ ├── __init__.py
│ │ ├── grader.py
│ │ └── math_normalize.py
│ ├── seqlen_balancing.py
│ ├── tokenizer.py
│ ├── torch_dtypes.py
│ ├── torch_functional.py
│ ├── tracking.py
│ └── ulysses.py
├── version/
│ └── version
└── workers/
├── __init__.py
├── actor/
│ ├── __init__.py
│ ├── base.py
│ ├── dp_actor.py
│ └── megatron_actor.py
├── critic/
│ ├── __init__.py
│ ├── base.py
│ ├── dp_critic.py
│ └── megatron_critic.py
├── fsdp_workers.py
├── megatron_workers.py
├── reward_manager/
│ ├── __init__.py
│ ├── naive.py
│ └── prime.py
├── reward_model/
│ ├── __init__.py
│ ├── base.py
│ └── megatron/
│ ├── __init__.py
│ └── reward_model.py
├── rollout/
│ ├── __init__.py
│ ├── base.py
│ ├── hf_rollout.py
│ ├── naive/
│ │ ├── __init__.py
│ │ └── naive_rollout.py
│ ├── tokenizer.py
│ └── vllm_rollout/
│ ├── __init__.py
│ ├── fire_vllm_rollout.py
│ ├── qwen_agent/
│ │ ├── code/
│ │ │ ├── code_interpreter.py
│ │ │ └── utils/
│ │ │ └── code_utils.py
│ │ ├── llm/
│ │ │ └── schema.py
│ │ ├── log.py
│ │ ├── settings.py
│ │ ├── tools/
│ │ │ ├── base.py
│ │ │ ├── code_interpreter.py
│ │ │ └── python_executor.py
│ │ └── utils/
│ │ └── utils.py
│ ├── vllm_rollout.py
│ └── vllm_rollout_spmd.py
└── sharding_manager/
├── __init__.py
├── base.py
├── fsdp_ulysses.py
├── fsdp_vllm.py
└── megatron_vllm.py
================================================
FILE CONTENTS
================================================
================================================
FILE: readme.md
================================================
#
# ToRL: Scaling Tool-Integrated RL
📄 Paper |
🌐 Dataset |
📘 Model
> Performance comparison of ToRL versus baseline models(16-step moving). Both plots show AIME24 Accuracy (%) against training steps across 1.5B and 7B models. In both cases, **ToRL(Ours)** significantly outperforms the baseline without tool and Qwen-2.5-Math-Instruct-TIR, achieving up to **12\%(1.5B)** and **14\%(7B)** higher.
> **Emergent cognitive behavior** during training. ToRL first cross-validates the tool's output with reasoning results. Upon detecting inconsistencies, it engages in reflection and further verification through tool calls.
## Releases
[2025/03/28] We're releasing the following components:
- 🚀 **Training**: Complete implementation of our training pipeline
- 🔥 **[ToRL Dataset](https://github.com/GAIR-NLP/ToRL/tree/main/data/torl_data)**: Our curated dataset of 28k mathematical questions
- 🤖 **[ToRL Model](https://huggingface.co/GAIR/ToRL)**: Model training with ToRL.
## Overview
This repository presents **ToRL (Tool-Integrated Reinforcement Learning)**, a framework that challenges traditional approaches to tool integration in language models by enabling LLMs to autonomously discover and refine tool usage strategies through reinforcement learning. Unlike prior methods constrained by supervised fine-tuning or predefined tool patterns, ToRL demonstrates that **exploration-driven learning** with computational tools can unlock emergent cognitive behaviors and achieve state-of-the-art performance on complex reasoning tasks. Notably, our approach operates directly from base models without imitation learning, achieving **43.3% accuracy on AIME2024** with a 7B model—matching the performance of larger 32B models trained with RL.
### Key Findings
- **Autonomous Tool Integration**: Models learn **when and how** to invoke tools (e.g., code interpreters) through RL-driven exploration, eliminating dependency on human-curated tool usage patterns.
- **Emergent Cognitive Abilities**:
- Self-correction by cross-validating code execution results with reasoning steps
- Adaptive strategy selection between tool-based and pure-reasoning approaches
- Self-regulation of ineffective tool calls without explicit supervision
### ToRL Performance
1.5B Model Performance across challenging mathematical benchmarks:
| Model | SFT/RL | Tool | AIME24 | AIME25 | MATH500 | Olympiad | AMC23 | Avg |
|----------------------------------|--------|------|--------|--------|---------|----------|-------|-------|
| Qwen2.5-Math-1.5B-Instruct | RL | ❌ | 10.0 | 10.0 | 66.0 | 31.0 | 62.5 | 35.9 |
| Qwen2.5-Math-1.5B-Instruct-TIR | RL | ✅ | 13.3 | 13.3 | 73.8 | 41.3 | 55.0 | 41.3 |
| **ToRL-1.5B(Ours)** | RL | ✅ | **26.7 (+13.3)** | **26.7 (+13.3)** | **77.8 (+3.0)** | **44.0 (+2.7)** | **67.5 (+5.0)** | **48.5 (+7.2)** |
7B Model Performance across challenging mathematical benchmarks:
| Model | SFT/RL | Tool | AIME24 | AIME25 | MATH500 | Olympiad | AMC23 | Avg |
|----------------------------------|--------|------|--------|--------|---------|----------|-------|-------|
| Qwen2.5-Math-7B-Instruct | RL | ❌ | 10.0 | 16.7 | 74.8 | 32.4 | 65.0 | 39.8 |
| Qwen2.5-Math-7B-Instruct-TIR | RL | ✅ | 26.7 | 16.7 | 78.8 | 45.0 | 70.0 | 47.4 |
| SimpleRL-Zero | RL | ❌ | 33.3 | 6.7 | 77.2 | 37.6 | 62.5 | 43.5 |
| rStar-Math-7B | SFT | ❌ | 26.7 | - | 78.4 | 47.1 | 47.5 | - |
| Eurus-2-7B-PRIME | RL | ❌ | 26.7 | 13.3 | 79.2 | 42.1 | 57.4 | 43.1 |
| **ToRL-7B(Ours)** | RL | ✅ | **43.3 (+10.0)** | **30.0 (+13.3)** | **82.2 (+3.0)** | **49.9 (+2.8)** | **75.0 (+5.0)** | **62.1 (+14.7)** |
### Cognitive Behavior via RL Scaling
In the left figure, the code initially generate by the model encountered an execution error, then it is corrected by model and is successfully executed.
In the right figure, the model first derives an incorrect result based on natural language reasoning, then discovers the error during code verification and makes corrections
## Quick Start
### Preparing the Sandbox Environment
According to the instructions provided in [https://github.com/bytedance/SandboxFusion](https://github.com/bytedance/SandboxFusion), install SandboxFusion and launch it.
```
# The following command can be used to install the sandbox environment
# to avoid dependency conflicts.
# The sandbox environment must be named "sandbox-runtime".
conda create -n sandbox-runtime python==3.11
pip install -r runtime/python/requirement.txt
pip install poetry
poetry install
mkdir -p docs/build
make run-online
```
Replace the `sandbox_url` on line `109` of `verl/workers/rollout/vllm_rollout/vllm_rollout_spmd.py` with the your sandbox.
### Environment setup
```
pip install -r requirements.txt
pip install wandb jsonlines math-verify hydra-core==1.4.0.dev1 sortedcontainers qwen-agent[code_interpreter] qwen-agent[python_executor]
```
### Training
Execute `bash scripts/torl_1.5b` to run ToRL.
## Acknowledgements
Our work builds upon the insightful technical reports from [DeepSeek R1](https://github.com/deepseek-ai/DeepSeek-R1) and [Kimi-k1.5](https://github.com/MoonshotAI/Kimi-k1.5) teams. We extend our appreciation to the [Qwen-Math](https://github.com/QwenLM/Qwen2.5-Math) team for their open-source model, to the creators of [VeRL](https://github.com/volcengine/verl) and [vLLM](https://github.com/vllm-project/vllm) for providing the essential reinforcement learning framework and inference infrastructure, respectively, that enabled this research. and to the [Qwen-Agent](https://github.com/QwenLM/Qwen-Agent) and [Sandbox Fusion](https://github.com/bytedance/SandboxFusion) team, which provided the necessary tools for our research.
## Citation
If you find this work useful, please cite our paper:
```bibtex
@misc{li2025torlscalingtoolintegratedrl,
title={ToRL: Scaling Tool-Integrated RL},
author={Xuefeng Li and Haoyang Zou and Pengfei Liu},
year={2025},
eprint={2503.23383},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2503.23383},
}
```
================================================
FILE: requirements.txt
================================================
accelerate==1.5.2
aiohappyeyeballs==2.6.1
aiohttp==3.11.14
aiohttp-cors==0.8.0
aiosignal==1.3.2
airportsdata==20250224
annotated-types==0.7.0
antlr4-python3-runtime==4.9.3
anyio==4.9.0
astor==0.8.1
async-timeout==5.0.1
attrs==25.3.0
blake3==1.0.4
cachetools==5.5.2
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
cloudpickle==3.1.1
codetiming==1.4.0
colorful==0.5.6
compressed-tensors==0.9.2
cupy-cuda12x==13.4.1
datasets==3.4.1
depyf==0.18.0
dill==0.3.8
diskcache==5.6.3
distlib==0.3.9
distro==1.9.0
dnspython==2.7.0
docker-pycreds==0.4.0
einops==0.8.1
email_validator==2.2.0
exceptiongroup==1.2.2
fastapi==0.115.11
fastapi-cli==0.0.7
fastrlock==0.8.3
filelock==3.18.0
flash_attn==2.7.4.post1
frozenlist==1.5.0
fsspec==2024.12.0
gguf==0.10.0
gitdb==4.0.12
GitPython==3.1.44
google-api-core==2.24.2
google-auth==2.38.0
googleapis-common-protos==1.69.2
grpcio==1.71.0
h11==0.14.0
httpcore==1.0.7
httptools==0.6.4
httpx==0.28.1
huggingface-hub==0.29.3
hydra-core==1.3.2
idna==3.10
importlib_metadata==8.6.1
interegular==0.3.3
Jinja2==3.1.6
jiter==0.9.0
jsonschema==4.23.0
jsonschema-specifications==2024.10.1
lark==1.2.2
liger_kernel==0.5.5
llvmlite==0.43.0
lm-format-enforcer==0.10.11
markdown-it-py==3.0.0
MarkupSafe==3.0.2
mdurl==0.1.2
mistral_common==1.5.4
mpmath==1.3.0
msgpack==1.1.0
msgspec==0.19.0
multidict==6.2.0
multiprocess==0.70.16
nest-asyncio==1.6.0
networkx==3.4.2
ninja==1.11.1.4
numba==0.60.0
numpy==1.26.4
nvidia-cublas-cu12==12.4.5.8
nvidia-cuda-cupti-cu12==12.4.127
nvidia-cuda-nvrtc-cu12==12.4.127
nvidia-cuda-runtime-cu12==12.4.127
nvidia-cudnn-cu12==9.1.0.70
nvidia-cufft-cu12==11.2.1.3
nvidia-curand-cu12==10.3.5.147
nvidia-cusolver-cu12==11.6.1.9
nvidia-cusparse-cu12==12.3.1.170
nvidia-cusparselt-cu12==0.6.2
nvidia-nccl-cu12==2.21.5
nvidia-nvjitlink-cu12==12.4.127
nvidia-nvtx-cu12==12.4.127
omegaconf==2.3.0
openai==1.68.2
opencensus==0.11.4
opencensus-context==0.1.3
opencv-python-headless==4.11.0.86
orjson==3.10.15
outlines==0.1.11
outlines_core==0.1.26
packaging==24.2
pandas==2.2.3
partial-json-parser==0.2.1.1.post5
peft==0.15.0
pillow==11.1.0
platformdirs==4.3.7
prometheus-fastapi-instrumentator==7.1.0
prometheus_client==0.21.1
propcache==0.3.0
proto-plus==1.26.1
protobuf==5.29.4
psutil==7.0.0
py-cpuinfo==9.0.0
py-spy==0.4.0
pyarrow==19.0.1
pyasn1==0.6.1
pyasn1_modules==0.4.1
pybind11==2.13.6
pycountry==24.6.1
pydantic==2.10.6
pydantic_core==2.27.2
Pygments==2.19.1
pylatexenc==2.10
python-dateutil==2.9.0.post0
python-dotenv==1.0.1
python-json-logger==3.3.0
python-multipart==0.0.20
pytz==2025.1
PyYAML==6.0.2
pyzmq==26.3.0
ray==2.44.0
referencing==0.36.2
regex==2024.11.6
requests==2.32.3
rich==13.9.4
rich-toolkit==0.13.2
rpds-py==0.23.1
rsa==4.9
safetensors==0.5.3
scipy==1.15.2
sentencepiece==0.2.0
sentry-sdk==2.24.0
setproctitle==1.3.5
shellingham==1.5.4
six==1.17.0
smart-open==7.1.0
smmap==5.0.2
sniffio==1.3.1
starlette==0.46.1
sympy==1.13.1
tensordict==0.6.0
tiktoken==0.9.0
tokenizers==0.21.1
torch==2.6.0
torchaudio==2.6.0
torchdata==0.11.0
torchvision==0.21.0
tqdm==4.67.1
transformers==4.50.0
triton==3.2.0
typer==0.15.2
typing_extensions==4.12.2
tzdata==2025.1
urllib3==2.3.0
uvicorn==0.34.0
uvloop==0.21.0
virtualenv==20.29.3
vllm==0.8.1
wandb==0.19.8
watchfiles==1.0.4
websockets==15.0.1
wrapt==1.17.2
xformers==0.0.29.post2
xgrammar==0.1.16
xxhash==3.5.0
yarl==1.18.3
zipp==3.21.0
================================================
FILE: scripts/torl_1.5b.sh
================================================
policy_path=Qwen/Qwen2.5-Math-1.5B
rollout_batch_size=128
n_samples_per_prompts=16
episode=300
temperature=1.0
batch_size=1024
lr=1e-6
kl_loss_coef=0.0
kl_coef=0.0
entropy_coeff=0
max_gen_length=3072
numcall=1
dataset_name=torl_data
run_name=rl.grpo_qwen.math.1.5b_${dataset_name}_numcall${numcall}
data_path=../data/${dataset_name}
samples_save_path=../data/samples/$run_name
python3 -m verl.trainer.main_ppo \
algorithm.adv_estimator=grpo \
data.train_files=$data_path/train.parquet \
data.val_files=$data_path/test.parquet \
data.train_batch_size=$rollout_batch_size \
data.val_batch_size=2048 \
data.max_prompt_length=400 \
data.max_response_length=$max_gen_length \
data.template_type=tir_base_0309 \
actor_rollout_ref.model.path=$policy_path \
actor_rollout_ref.actor.optim.lr=$lr \
actor_rollout_ref.model.use_remove_padding=True \
actor_rollout_ref.actor.ppo_mini_batch_size=$batch_size \
actor_rollout_ref.actor.use_dynamic_bsz=True \
actor_rollout_ref.actor.use_kl_loss=True \
actor_rollout_ref.actor.kl_loss_coef=$kl_loss_coef \
actor_rollout_ref.actor.kl_loss_type=low_var_kl \
actor_rollout_ref.actor.entropy_coeff=$entropy_coeff \
actor_rollout_ref.model.enable_gradient_checkpointing=True \
actor_rollout_ref.actor.fsdp_config.param_offload=True \
actor_rollout_ref.actor.fsdp_config.optimizer_offload=True \
actor_rollout_ref.rollout.tensor_model_parallel_size=1 \
actor_rollout_ref.rollout.name=vllm \
actor_rollout_ref.rollout.num_llm_calls_available=$numcall \
actor_rollout_ref.rollout.temperature=$temperature \
actor_rollout_ref.rollout.top_p=1.0 \
actor_rollout_ref.rollout.gpu_memory_utilization=0.85 \
actor_rollout_ref.rollout.n=$n_samples_per_prompts \
actor_rollout_ref.rollout.log_prob_use_dynamic_bsz=True \
actor_rollout_ref.rollout.max_num_seqs=1024 \
actor_rollout_ref.ref.log_prob_use_dynamic_bsz=True \
actor_rollout_ref.ref.fsdp_config.param_offload=True \
algorithm.kl_ctrl.kl_coef=$kl_coef \
trainer.logger=['console','wandb'] \
trainer.project_name=torl \
trainer.experiment_name=${run_name} \
trainer.n_gpus_per_node=8 \
trainer.nnodes=1 \
trainer.save_freq=20 \
trainer.test_freq=4 \
trainer.default_local_dir=path_to_save/${run_name} \
trainer.resume_mode=auto \
trainer.samples_save_path=$samples_save_path \
trainer.total_epochs=$episode $@
================================================
FILE: verl/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
version_folder = os.path.dirname(os.path.join(os.path.abspath(__file__)))
with open(os.path.join(version_folder, 'version/version')) as f:
__version__ = f.read().strip()
from .protocol import DataProto
from .utils.logging_utils import set_basic_config
import logging
set_basic_config(level=logging.WARNING)
from . import single_controller
__all__ = ['DataProto', "__version__"]
if os.getenv('VERL_USE_MODELSCOPE', 'False').lower() == 'true':
import importlib
if importlib.util.find_spec("modelscope") is None:
raise ImportError(f'You are using the modelscope hub, please install modelscope by `pip install modelscope -U`')
# Patch hub to download models from modelscope to speed up.
from modelscope.utils.hf_util import patch_hub
patch_hub()
================================================
FILE: verl/models/README.md
================================================
# Models
Common modelzoo such as huggingface/transformers stuggles when using Pytorch native model parallelism. Following the design principle of vLLM, we keep a simple, parallelizable, highly-optimized with packed inputs in verl.
## Adding a New Huggingface Model
### Step 1: Copy the model file from HF to verl
- Add a new file under verl/models/hf
- Copy ONLY the model file from huggingface/transformers/models to verl/models/hf
### Step 2: Modify the model file to use packed inputs
- Remove all the code related to inference (kv cache)
- Modify the inputs to include only
- input_ids (total_nnz,)
- cu_seqlens (total_nnz + 1,)
- max_seqlen_in_batch: int
- Note that this requires using flash attention with causal mask.
### Step 2.5: Add tests
- Add a test to compare this version and the huggingface version
- Following the infrastructure and add tests to tests/models/hf
### Step 3: Add a function to apply tensor parallelism
- Please follow
- https://pytorch.org/docs/stable/distributed.tensor.parallel.html
- https://pytorch.org/tutorials/intermediate/TP_tutorial.html
- General comments
- Tensor Parallelism in native Pytorch is NOT auto-parallelism. The way it works is to specify how model parameters and input/output reshards using configs. These configs are then registered as hooks to perform input/output resharding before/after model forward.
### Step 4: Add a function to apply data parallelism
- Please use FSDP2 APIs
- See demo here https://github.com/pytorch/torchtitan/blob/main/torchtitan/parallelisms/parallelize_llama.py#L413
### Step 5: Add a function to apply pipeline parallelism
- Comes in Pytorch 2.4
- Currently only in alpha in nightly version
- Check torchtitan for more details
================================================
FILE: verl/models/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/models/llama/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/models/llama/megatron/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .modeling_llama_megatron import (
# original model with megatron
ParallelLlamaModel,
ParallelLlamaForCausalLM,
# rmpad with megatron
ParallelLlamaForCausalLMRmPad,
ParallelLlamaForValueRmPad,
# rmpad with megatron and pipeline parallelism
ParallelLlamaForCausalLMRmPadPP,
ParallelLlamaForValueRmPadPP)
================================================
FILE: verl/models/llama/megatron/checkpoint_utils/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/models/llama/megatron/checkpoint_utils/llama_loader.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from packaging.version import Version
import torch
import time
from typing import Dict, Any, Callable, Optional
import torch.distributed as dist
def _megatron_calc_layer_map(config):
"""Calculate the mapping of global layer_idx to local layer_idx
Returns:
layer_map (Dict: int -> tuple(int, int, int)):
mapping from the global layer index to
a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model)
"""
import megatron
from megatron.core import mpu
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
layer_map = dict()
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
for pp_rank_idx in range(pp_size):
for virtual_pp_rank_idx in range(virtual_pp_size):
layer_offset = (virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) +
pp_rank_idx * num_layers_per_model)
for layer_idx in range(num_layers_per_model):
layer_map[layer_offset + layer_idx] = (
pp_rank_idx,
virtual_pp_rank_idx,
layer_idx,
)
return layer_map
def load_state_dict_to_megatron_llama(state_dict, wrapped_models, config, params_dtype, is_value_model=False):
"""Load merged state_dict to sharded Megatron module in training.
"""
import megatron
from megatron.core import mpu
from megatron.training.utils import print_rank_0, unwrap_model
from megatron.core.transformer.module import Float16Module
from megatron.core import DistributedDataParallel as LocalDDP
from torch.nn.parallel import DistributedDataParallel as torchDDP
start_time = time.time()
def _get_gpt_model(model):
return model
def broadcast_params(module):
for param in module.parameters():
torch.distributed.broadcast(param.data,
src=mpu.get_data_parallel_src_rank(),
group=mpu.get_data_parallel_group())
dp_rank = mpu.get_data_parallel_rank()
pp_rank = mpu.get_pipeline_model_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
mp_group = mpu.get_model_parallel_group()
if torch.distributed.get_rank() == 0:
assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0"
assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0"
assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0"
if not isinstance(wrapped_models, (list, tuple)):
wrapped_models = list(wrapped_models)
assert len(wrapped_models) == virtual_pp_size
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
models = [None] * len(wrapped_models)
for i, wrapped_model in enumerate(wrapped_models):
models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module))
gpt_model_module = _get_gpt_model(models[i])
assert len(gpt_model_module.model.layers) == num_layers_per_model
def _broadcast_tensor(tensor, name) -> torch.Tensor:
"""broadcast tensor from rank0 across mp_group"""
nonlocal state_dict
nonlocal mp_group
if torch.distributed.get_rank() == 0:
if name in state_dict:
weight = state_dict[name]
tensor_shape = weight.shape
else:
tensor_shape = None
else:
weight = None
tensor_shape = None
obj_list = [tensor_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
tensor_shape = obj_list[0]
if tensor_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tensor:[{name}] not in state_dict, skip load")
return
if tensor is None:
tensor = torch.empty(
tensor_shape,
dtype=params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
if torch.distributed.get_rank() == 0:
tensor.data.copy_(weight)
dist.broadcast(tensor, src=0, group=mp_group)
def _broadcast_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == 0:
if name in state_dict:
full_weight = state_dict[name]
if mutate_func is not None:
full_weight = mutate_func(full_weight)
tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
else:
assert (tensor.shape == chunk_shape
), f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}"
sync_tensor = torch.empty_like(tensor, device=torch.cuda.current_device(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == 0:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=0, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
def _broadcast_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == 0:
if name in state_dict:
full_weight = state_dict[name]
if mutate_func is not None:
full_weight = mutate_func(full_weight)
tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
else:
assert (tensor.shape == chunk_shape
), f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}"
sync_tensor = torch.empty_like(tensor, device=torch.cuda.current_device(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == 0:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=0, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == 0:
gate_weight = state_dict[gate_name]
up_weight = state_dict[up_name]
new_gate_up_weight = torch.empty(config.intermediate_size * 2,
config.hidden_size,
dtype=params_dtype,
device=torch.cuda.current_device())
for i in range(tp_size):
intermediate_size_tp = config.intermediate_size // tp_size
gate_weight_tp = gate_weight[i * intermediate_size_tp:(i + 1) * intermediate_size_tp]
up_weight_tp = up_weight[i * intermediate_size_tp:(i + 1) * intermediate_size_tp]
new_gate_up_weight[intermediate_size_tp * 2 * i:intermediate_size_tp * 2 * (i + 1)].copy_(
torch.cat([gate_weight_tp, up_weight_tp], dim=0))
tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
else:
assert (
tensor.shape == chunk_shape
), f"rank #{torch.distributed.get_rank() == 0:} tensor {gate_name, up_name} shape {tensor.shape} != {chunk_shape}"
sync_tensor = torch.empty_like(tensor, device=torch.cuda.current_device(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == 0:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=0, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == 0:
assert (q_name in state_dict and k_name in state_dict and v_name in state_dict)
full_weight_q = state_dict[q_name]
full_weight_k = state_dict[k_name]
full_weight_v = state_dict[v_name]
hidden_size_per_head = config.hidden_size // config.num_attention_heads
if config.num_key_value_heads >= tp_size:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size
total_size = q_size_tp + 2 * kv_size_tp
new_weight_qkv = torch.empty(total_size * tp_size,
config.hidden_size,
dtype=params_dtype,
device=torch.cuda.current_device())
for i in range(tp_size):
q_part = full_weight_q[i * q_size_tp:(i + 1) * q_size_tp]
k_part = full_weight_k[i * kv_size_tp:(i + 1) * kv_size_tp]
v_part = full_weight_v[i * kv_size_tp:(i + 1) * kv_size_tp]
new_weight_qkv[i * total_size:(i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part],
dim=0))
else:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head
total_size = q_size_tp + 2 * kv_size_tp
new_weight_qkv = torch.empty(total_size * tp_size,
config.hidden_size,
dtype=params_dtype,
device=torch.cuda.current_device())
for i in range(tp_size):
q_part = full_weight_q[i * q_size_tp:(i + 1) * q_size_tp]
start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head
end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head
k_part = full_weight_k[start_idx:end_idx]
v_part = full_weight_v[start_idx:end_idx]
new_weight_qkv[i * total_size:(i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part],
dim=0))
tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
else:
assert (tensor.shape == chunk_shape
), f"rank #{torch.distributed.get_rank()} tensor {q_name} shape {tensor.shape} != {chunk_shape}"
sync_tensor = torch.empty_like(tensor, device=torch.cuda.current_device(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == 0:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=0, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
if dp_rank == 0:
# Embeddings
# -------------------
print_rank_0("loading embeddings...")
gpt_model_module = _get_gpt_model(models[0])
embed_tokens_weight = None
if pp_rank == 0:
embed_tokens_weight = gpt_model_module.model.embed_tokens.weight
_broadcast_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight")
# Transformer layers
# -------------------
layer_map = _megatron_calc_layer_map(config)
for layer in range(config.num_hidden_layers):
print_rank_0(f"loading layer #{layer}...")
layer_name = f"model.layers.{layer}"
dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer]
gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank])
sync_layer = gpt_model_module.model.layers[dst_layer_idx]
_broadcast_tensor(
sync_layer.input_layernorm.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.input_layernorm.weight",
)
_broadcast_tp_shard_tensor_qkv(
sync_layer.self_attn.qkv_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.q_proj.weight",
f"{layer_name}.self_attn.k_proj.weight",
f"{layer_name}.self_attn.v_proj.weight",
)
_broadcast_tp_shard_tensor(
sync_layer.self_attn.o_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.o_proj.weight",
chunk_dim=1,
)
_broadcast_tensor(
sync_layer.post_attention_layernorm.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.post_attention_layernorm.weight",
)
_broadcast_tp_shard_tensor_gate_up(sync_layer.mlp.gate_up_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight")
_broadcast_tp_shard_tensor(
sync_layer.mlp.down_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.mlp.down_proj.weight",
chunk_dim=1,
)
# Final Layernorm
# -------------------
print_rank_0("loading final layernorm...")
gpt_model_module = _get_gpt_model(models[-1])
_broadcast_tensor(
getattr(gpt_model_module.model.norm, "weight", None),
"model.norm.weight",
)
print_rank_0("loading lm_head...")
lm_head_weight = None
if pp_rank + 1 == pp_size:
lm_head_weight = gpt_model_module.lm_head.weight
if is_value_model:
# if torch.distributed.get_rank() == 0:
if 'lm_head.weight' in state_dict and state_dict['lm_head.weight'].shape[0] == 1:
_broadcast_tensor(lm_head_weight, "lm_head.weight")
elif 'reward_head.weight' in state_dict and state_dict['reward_head.weight'].shape[0] == 1:
_broadcast_tensor(lm_head_weight, "reward_head.weight")
print_rank_0('load lm_head from value_head weight')
else:
_broadcast_tensor(None, "lm_head.weight")
print_rank_0('fail to match lm_head in value_model')
# else:
# _broadcast_tensor(lm_head_weight, "lm_head.weight")
else:
_broadcast_tp_shard_tensor(lm_head_weight, "lm_head.weight")
dist.barrier()
# Broadcast weights inside data parallel groups
for wrapped_model in wrapped_models:
broadcast_params(wrapped_model)
torch.cuda.empty_cache()
print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s")
================================================
FILE: verl/models/llama/megatron/checkpoint_utils/llama_saver.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from packaging.version import Version
from torch.nn.parallel import DistributedDataParallel as torchDDP
import torch
import time
from typing import Optional
import torch.distributed as dist
import megatron
from megatron import get_args
from megatron.core import mpu
from megatron.core.transformer.module import Float16Module
from megatron.core.distributed import DistributedDataParallel as LocalDDP
from megatron.training.utils import print_rank_0, unwrap_model
def _megatron_calc_global_rank(tp_rank: int = 0, dp_rank: int = 0, pp_rank: int = 0):
"""given TP,DP,PP rank to get the global rank."""
args = get_args()
tp_size = mpu.get_tensor_model_parallel_world_size()
dp_size = mpu.get_data_parallel_world_size()
pp_size = mpu.get_pipeline_model_parallel_world_size()
assert (tp_size * dp_size * pp_size == torch.distributed.get_world_size()
), f"{tp_size} x {dp_size} x {pp_size} != {torch.distributed.get_world_size()}"
if args.switch_dp_and_pp_grouping:
# TP-PP-DP grouping
return (dp_rank * pp_size + pp_rank) * tp_size + tp_rank
else:
# TP-DP-PP grouping
return (pp_rank * dp_size + dp_rank) * tp_size + tp_rank
def _megatron_calc_layer_map(config):
"""Calculate the mapping of global layer_idx to local layer_idx
Returns:
layer_map (Dict: int -> tuple(int, int, int)):
mapping from the global layer index to
a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model)
"""
import megatron
from megatron.core import mpu
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
args = megatron.get_args()
layer_map = dict()
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
for pp_rank_idx in range(pp_size):
for virtual_pp_rank_idx in range(virtual_pp_size):
layer_offset = (virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) +
pp_rank_idx * num_layers_per_model)
for layer_idx in range(num_layers_per_model):
layer_map[layer_offset + layer_idx] = (
pp_rank_idx,
virtual_pp_rank_idx,
layer_idx,
)
return layer_map
def merge_megatron_ckpt_llama(wrapped_models, config, is_value_model=False, dtype='bf16'):
"""Merge sharded parameters of a Megatron module into a merged checkpoint.
Args:
wrapped_models (list of megatron.core.distributed.DistributedDataParallel):
The local DDP wrapped megatron modules.
dtype (str or None):
The data type of state_dict. if None, the data type of the original parameters
is used.
gpt_model_key: key to access model
Returns:
state_dict (dict):
The merged state_dict in rank 0, and an empty dictionary in other ranks.
"""
start_time = time.time()
args = megatron.get_args()
def _get_gpt_model(model):
return model
dp_rank = mpu.get_data_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
pp_rank = mpu.get_pipeline_model_parallel_rank()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
mp_group = mpu.get_model_parallel_group()
if dist.get_rank() == 0:
assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0"
assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0"
assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0"
if not isinstance(wrapped_models, (list, tuple)):
wrapped_models = list(wrapped_models)
assert len(wrapped_models) == virtual_pp_size
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
models = [None] * len(wrapped_models)
for i, wrapped_model in enumerate(wrapped_models):
models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module))
assert len(models[i].model.layers
) == num_layers_per_model, 'len model layers {} not equal to num_layers_per_model {}'.format(
len(models[i].model.layers), num_layers_per_model)
state_dict = dict()
def _get_cpu_tensor(tensor: torch.Tensor):
if tensor is None:
return None
if tensor.device == torch.device("cpu"):
return tensor.detach().clone()
return tensor.detach().cpu()
def _broadcast_tensor(tensor, name, src_pp_rank) -> torch.Tensor:
"""broadcast tensor across mp_group"""
nonlocal state_dict
nonlocal mp_group
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank)
if torch.distributed.get_rank() == src_rank:
if tensor is None:
weight = None
tensor_shape = None
else:
weight = tensor
tensor_shape = weight.shape
else:
weight = None
tensor_shape = None
obj_list = [tensor_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
tensor_shape = obj_list[0]
if tensor_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tensor:[{name}] not exist, skip collect")
return
if weight is None:
weight = torch.empty(
tensor_shape,
dtype=args.params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
dist.broadcast(weight, src=src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
state_dict[name] = _get_cpu_tensor(weight)
def _broadcast_tp_shard_tensor(tensor, name, src_pp_rank, concat_dim=0, mutate_func=None) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank)
if torch.distributed.get_rank() == src_rank:
chunk_shape = tensor.shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not exist, skip collecting")
return
buffer_tensor = torch.empty(
chunk_shape,
dtype=args.params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
chunk_tensors = [None] * tp_size
for i in range(tp_size):
cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank)
sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor
dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
chunk_tensors[i] = _get_cpu_tensor(sync_tensor)
if torch.distributed.get_rank() == 0:
full_tensor = torch.concat(chunk_tensors, dim=concat_dim)
if mutate_func is not None:
full_tensor = mutate_func(full_tensor)
state_dict[name] = full_tensor
def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name, src_pp_rank) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank)
if torch.distributed.get_rank() == src_rank:
chunk_shape = tensor.shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not exist, skip collecting")
return
buffer_tensor = torch.empty(
chunk_shape,
dtype=args.params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
chunk_tensors = [None] * tp_size
for i in range(tp_size):
cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank)
sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor
dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
chunk_tensors[i] = _get_cpu_tensor(sync_tensor)
if torch.distributed.get_rank() == 0:
full_tensor = torch.concat(chunk_tensors, dim=0)
intermediate_size_tp = config.intermediate_size // tp_size
gate_weight_list = []
up_weight_list = []
for i in range(tp_size):
gate_up_weight_tp = full_tensor[intermediate_size_tp * 2 * i:intermediate_size_tp * 2 * (i + 1)]
gate_weight_tp = gate_up_weight_tp[:intermediate_size_tp]
up_weight_tp = gate_up_weight_tp[intermediate_size_tp:]
gate_weight_list.append(gate_weight_tp)
up_weight_list.append(up_weight_tp)
state_dict[gate_name] = torch.cat(gate_weight_list, dim=0)
state_dict[up_name] = torch.cat(up_weight_list, dim=0)
def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, src_pp_rank):
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank)
if torch.distributed.get_rank() == src_rank:
chunk_shape = tensor.shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{q_name}] not exist, skip collecting")
return
buffer_tensor = torch.empty(
chunk_shape,
dtype=args.params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
chunk_tensors = [None] * tp_size
for i in range(tp_size):
cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank)
sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor
dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
chunk_tensors[i] = _get_cpu_tensor(sync_tensor)
if torch.distributed.get_rank() == 0:
full_tensor = torch.concat(chunk_tensors, dim=0)
q_weight_list = []
k_weight_list = []
v_weight_list = []
hidden_size_per_head = config.hidden_size // config.num_attention_heads
if config.num_key_value_heads >= tp_size:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size
total_size = q_size_tp + 2 * kv_size_tp
for i in range(tp_size):
qkv_part = full_tensor[i * total_size:(i + 1) * total_size]
q_part = qkv_part[:q_size_tp]
k_part = qkv_part[q_size_tp:q_size_tp + kv_size_tp]
v_part = qkv_part[q_size_tp + kv_size_tp:total_size]
q_weight_list.append(q_part)
k_weight_list.append(k_part)
v_weight_list.append(v_part)
else:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head
total_size = q_size_tp + 2 * kv_size_tp
for i in range(tp_size):
qkv_part = full_tensor[i * total_size:(i + 1) * total_size]
q_part = qkv_part[:q_size_tp]
k_part = qkv_part[q_size_tp:q_size_tp + kv_size_tp]
v_part = qkv_part[q_size_tp + kv_size_tp:total_size]
q_weight_list.append(q_part)
if i * config.num_key_value_heads % tp_size == 0:
k_weight_list.append(k_part)
v_weight_list.append(v_part)
state_dict[q_name] = torch.cat(q_weight_list, dim=0)
state_dict[k_name] = torch.cat(k_weight_list, dim=0)
state_dict[v_name] = torch.cat(v_weight_list, dim=0)
# empty cache before collecting weights
torch.cuda.empty_cache()
# Embeddings
# -------------------
if dp_rank == 0:
# Embeddings
# -------------------
print_rank_0("collecting embeddings...")
gpt_model_module = _get_gpt_model(models[0])
_broadcast_tp_shard_tensor(
gpt_model_module.model.embed_tokens.weight if pp_rank == 0 else None,
"model.embed_tokens.weight",
src_pp_rank=0,
)
# Transformer layers
# -------------------
layer_map = _megatron_calc_layer_map(config)
for layer in range(config.num_hidden_layers):
print_rank_0(f"collecting layer #{layer}...")
layer_name = f"model.layers.{layer}"
src_pp_rank, src_virtual_pp_rank, src_layer_idx = layer_map[layer]
gpt_model_module = _get_gpt_model(models[src_virtual_pp_rank])
sync_layer = gpt_model_module.model.layers[src_layer_idx]
_broadcast_tensor(
sync_layer.input_layernorm.weight,
f"{layer_name}.input_layernorm.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor_qkv(
sync_layer.self_attn.qkv_proj.weight,
f"{layer_name}.self_attn.q_proj.weight",
f"{layer_name}.self_attn.k_proj.weight",
f"{layer_name}.self_attn.v_proj.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor(
sync_layer.self_attn.o_proj.weight,
f"{layer_name}.self_attn.o_proj.weight",
concat_dim=1,
src_pp_rank=src_pp_rank,
)
_broadcast_tensor(
sync_layer.post_attention_layernorm.weight,
f"{layer_name}.post_attention_layernorm.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor_gate_up(sync_layer.mlp.gate_up_proj.weight,
f"{layer_name}.mlp.gate_proj.weight",
f"{layer_name}.mlp.up_proj.weight",
src_pp_rank=src_pp_rank)
_broadcast_tp_shard_tensor(
sync_layer.mlp.down_proj.weight,
f"{layer_name}.mlp.down_proj.weight",
concat_dim=1,
src_pp_rank=src_pp_rank,
)
# Final Layernorm
# -------------------
print_rank_0("collecting final layernorm...")
gpt_model_module = _get_gpt_model(models[-1])
_broadcast_tensor(
getattr(gpt_model_module.model.norm, "weight", None),
"model.norm.weight",
src_pp_rank=pp_size - 1,
)
print_rank_0("collecting lm_head...")
if is_value_model:
_broadcast_tensor(getattr(gpt_model_module.lm_head, "weight", None) if pp_rank == pp_size - 1 else None,
"reward_head.weight",
src_pp_rank=pp_size - 1)
else:
_broadcast_tp_shard_tensor(
getattr(gpt_model_module.lm_head, "weight", None) if pp_rank == pp_size - 1 else None,
"lm_head.weight",
src_pp_rank=pp_size - 1,
)
dist.barrier()
torch.cuda.empty_cache()
if torch.distributed.get_rank() == 0:
if dtype == "fp16":
dtype = torch.float16
elif dtype == "bf16":
dtype = torch.bfloat16
elif dtype is None or dtype == "fp32":
dtype = torch.float32
else:
print(f'Unknown/unsupported dtype to save: {dtype}"')
exit(1)
for k, v in state_dict.items():
if dtype != v.dtype:
state_dict[k] = v.to(dtype)
print_rank_0(f"merge megatron ckpt done, time elapsed {time.time() - start_time}s")
return state_dict
================================================
FILE: verl/models/llama/megatron/layers/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .parallel_attention import ParallelLlamaAttention
from .parallel_decoder import ParallelLlamaDecoderLayer, ParallelLlamaDecoderLayerRmPad
from .parallel_mlp import ParallelLlamaMLP
from .parallel_rmsnorm import ParallelLlamaRMSNorm
================================================
FILE: verl/models/llama/megatron/layers/parallel_attention.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional, Tuple
import torch
from megatron.core import parallel_state as mpu
from megatron.core import tensor_parallel
from megatron.core import ModelParallelConfig
from torch import nn
from transformers import LlamaConfig
from verl.models.llama.megatron.layers.parallel_linear import QKVParallelLinear
from verl.utils.megatron import tensor_parallel as tp_utils
class LlamaRotaryEmbedding(nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
super().__init__()
self.dim = dim
self.max_position_embeddings = max_position_embeddings
self.base = base
inv_freq = 1.0 / (self.base**(torch.arange(0, self.dim, 2).float().to(device) / self.dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
# Build here to make `torch.jit.trace` work.
self._set_cos_sin_cache(seq_len=max_position_embeddings,
device=self.inv_freq.device,
dtype=torch.get_default_dtype())
def _set_cos_sin_cache(self, seq_len, device, dtype):
self.max_seq_len_cached = seq_len
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
def forward(self, x, seq_len=None):
# x: [bs, num_attention_heads, seq_len, head_size]
if seq_len > self.max_seq_len_cached:
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
return (
self.cos_cached[:seq_len].to(dtype=x.dtype),
self.sin_cached[:seq_len].to(dtype=x.dtype),
)
class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):
"""LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
self.scaling_factor = scaling_factor
super().__init__(dim, max_position_embeddings, base, device)
def _set_cos_sin_cache(self, seq_len, device, dtype):
self.max_seq_len_cached = seq_len
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
t = t / self.scaling_factor
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):
"""LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
self.scaling_factor = scaling_factor
super().__init__(dim, max_position_embeddings, base, device)
def _set_cos_sin_cache(self, seq_len, device, dtype):
self.max_seq_len_cached = seq_len
if seq_len > self.max_position_embeddings:
base = self.base * ((self.scaling_factor * seq_len / self.max_position_embeddings) -
(self.scaling_factor - 1))**(self.dim / (self.dim - 2))
inv_freq = 1.0 / (base**(torch.arange(0, self.dim, 2).float().to(device) / self.dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., :x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
class ParallelLlamaAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
super().__init__()
self.config = config
self.megatron_config = megatron_config
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
# assign values after tp
tp_size = mpu.get_tensor_model_parallel_world_size()
assert self.num_heads % tp_size == 0, f'num_head must be divisible by tp_size. Got num_head={self.num_heads}, tp_size={tp_size}'
assert self.num_key_value_heads % tp_size == 0, \
f'num_key_value_heads must be divisible by tp_size. Got num_key_value_heads={self.num_key_value_heads}, tp_size={tp_size}'
self.num_heads_per_tp = self.num_heads // tp_size
self.num_key_value_heads_per_tp = self.num_key_value_heads // tp_size
self.hidden_size_per_tp = self.hidden_size // tp_size
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads}).")
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear()
if megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
assert row_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(column_kwargs, megatron_config)
tp_utils.update_kwargs_with_config(row_kwargs, megatron_config)
# [self.q_size, self.k_size, self.v_size]
self.qkv_proj = QKVParallelLinear(input_size=self.hidden_size,
num_heads=self.num_heads,
num_key_value_heads=self.num_key_value_heads,
head_dim=self.head_dim,
bias=config.attention_bias,
gather_output=False,
skip_bias_add=False,
**column_kwargs)
self.q_size = self.num_heads_per_tp * self.head_dim
self.k_size = self.num_key_value_heads_per_tp * self.head_dim
self.v_size = self.num_key_value_heads_per_tp * self.head_dim
self.o_proj = tensor_parallel.RowParallelLinear(input_size=self.num_heads * self.head_dim,
output_size=self.hidden_size,
bias=config.attention_bias,
input_is_parallel=True,
skip_bias_add=False,
**row_kwargs)
self._init_rope()
def _init_rope(self):
if self.config.rope_scaling is None:
self.rotary_emb = LlamaRotaryEmbedding(
self.head_dim,
max_position_embeddings=self.max_position_embeddings,
base=self.rope_theta,
)
else:
scaling_type = self.config.rope_scaling["type"]
scaling_factor = self.config.rope_scaling["factor"]
if scaling_type == "linear":
self.rotary_emb = LlamaLinearScalingRotaryEmbedding(
self.head_dim,
max_position_embeddings=self.max_position_embeddings,
scaling_factor=scaling_factor,
base=self.rope_theta,
)
elif scaling_type == "dynamic":
self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(
self.head_dim,
max_position_embeddings=self.max_position_embeddings,
scaling_factor=scaling_factor,
base=self.rope_theta,
)
else:
raise ValueError(f"Unknown RoPE scaling type {scaling_type}")
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
qkv = self.qkv_proj(hidden_states)[0]
query_states, key_states, value_states = qkv.split([self.q_size, self.k_size, self.v_size], dim=-1)
query_states = query_states.view(bsz, q_len, self.num_heads_per_tp, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2)
kv_seq_len = key_states.shape[-2]
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attn_weights.size() != (bsz, self.num_heads_per_tp, q_len, kv_seq_len):
raise ValueError(
f"Attention weights should be of size {(bsz, self.num_heads_per_tp, q_len, kv_seq_len)}, but is"
f" {attn_weights.size()}")
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}")
attn_weights = attn_weights + attention_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads_per_tp, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads_per_tp, q_len, self.head_dim)}, but is"
f" {attn_output.size()}")
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size_per_tp)
attn_output = self.o_proj(attn_output)[0]
return attn_output
"""
Remove padding Attention
- Using Flash-attn 2
- Compatible with sequence parallel
"""
from transformers.utils import is_flash_attn_2_available
import torch.nn.functional as F
from einops import rearrange
if is_flash_attn_2_available():
from flash_attn import flash_attn_varlen_func
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
def apply_rotary_pos_emb_rmpad(q, k, cos, sin, position_ids, indices, sequence_length):
batch_size = position_ids.shape[0]
q = pad_input(q, indices, batch_size, sequence_length) # (batch_size, seqlen, num_head, head_dim)
k = pad_input(k, indices, batch_size, sequence_length)
cos = cos[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]
sin = sin[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
q_embed = index_first_axis(rearrange(q_embed, "b s ... -> (b s) ..."), indices)
k_embed = index_first_axis(rearrange(k_embed, "b s ... -> (b s) ..."), indices)
return q_embed, k_embed
from flash_attn.layers.rotary import apply_rotary_emb
# use flash-attn rotary embeddings with rmpad
# cos/sin shoudl be: (seq_length, rotary_dim / 2)
def apply_rotary_pos_emb_rmpad_flash(q, k, cos, sin, cu_seqlens, max_seqlen):
q_embed = apply_rotary_emb(q,
cos,
sin,
interleaved=False,
inplace=False,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen)
k_embed = apply_rotary_emb(k,
cos,
sin,
interleaved=False,
inplace=False,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen)
return q_embed, k_embed
class ParallelLlamaAttentionRmPad(ParallelLlamaAttention):
def forward(self,
hidden_states: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: torch.Tensor = None,
max_seqlen_in_batch: int = None):
total_nnz, _, _ = hidden_states.size() # This is the total_nnz padded after sequence parallel
if self.megatron_config.sequence_parallel:
total_nnz = total_nnz * mpu.get_tensor_model_parallel_world_size()
qkv = self.qkv_proj(hidden_states)[0]
query_states, key_states, value_states = qkv.split([self.q_size, self.k_size, self.v_size],
dim=-1) # (total_nnz, 1, hidden_size)
if self.megatron_config.sequence_parallel:
sequence_parallel_pad = total_nnz - cu_seqlens[-1]
total_nnz = cu_seqlens[-1] # total_nnz before sp padding
query_states = query_states[:total_nnz]
key_states = key_states[:total_nnz]
value_states = value_states[:total_nnz]
# Flash attention requires the input to have the shape
# batch_size x seq_length x head_dime x hidden_dim
# therefore we just need to keep the original shape
query_states = query_states.view(total_nnz, self.num_heads_per_tp, self.head_dim)
key_states = key_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim)
value_states = value_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim)
cos, sin = self.rotary_emb(value_states, seq_len=sequence_length)
cos, sin = cos[:, :cos.shape[1] // 2], sin[:, :sin.shape[1] // 2] # flash attn only needs half
query_states, key_states = apply_rotary_pos_emb_rmpad_flash(query_states,
key_states,
cos,
sin,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen_in_batch)
# query_states, key_states = apply_rotary_pos_emb_rmpad(query_states, key_states, cos, sin, position_ids, indices,
# TODO: llama does not have dropout in the config??
# It is recommended to use dropout with FA according to the docs
# when training.
dropout_rate = 0.0 # if not self.training else self.attn_dropout
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in float16 just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (LlamaRMSNorm handles it correctly)
input_dtype = query_states.dtype
if input_dtype == torch.float32:
query_states = query_states.to(torch.float16)
key_states = key_states.to(torch.float16)
value_states = value_states.to(torch.float16)
attn_output_unpad = flash_attn_varlen_func(
query_states,
key_states,
value_states,
cu_seqlens_q=cu_seqlens,
cu_seqlens_k=cu_seqlens,
max_seqlen_q=max_seqlen_in_batch,
max_seqlen_k=max_seqlen_in_batch,
dropout_p=dropout_rate,
softmax_scale=None,
causal=True,
)
attn_output_unpad = attn_output_unpad.to(input_dtype)
attn_output_unpad = attn_output_unpad.reshape(total_nnz, 1, self.hidden_size_per_tp).contiguous()
# sequence parallel reduce_scatter is performed inside RowColumnParallel if enabled
# Here we need to repad
if self.megatron_config.sequence_parallel:
attn_output_unpad = F.pad(attn_output_unpad, pad=(0, 0, 0, 0, 0, sequence_parallel_pad))
attn_output_unpad = self.o_proj(attn_output_unpad)[0]
return attn_output_unpad
================================================
FILE: verl/models/llama/megatron/layers/parallel_decoder.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import LlamaConfig
from megatron.core import ModelParallelConfig
from .parallel_attention import ParallelLlamaAttention, ParallelLlamaAttentionRmPad
from .parallel_mlp import ParallelLlamaMLP
from .parallel_rmsnorm import ParallelLlamaRMSNorm
class ParallelLlamaDecoderLayer(nn.Module):
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = ParallelLlamaAttention(config=config, megatron_config=megatron_config)
self.mlp = ParallelLlamaMLP(config, megatron_config=megatron_config)
self.input_layernorm = ParallelLlamaRMSNorm(config, megatron_config)
self.post_attention_layernorm = ParallelLlamaRMSNorm(config, megatron_config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Note: sequence parallel is hidden inside ColumnParallelLinear
# reduce scatter is hidden inside RowParallelLinear
# Self Attention
hidden_states = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
)
# TODO: add sequence parallel operator reduce_scatter here
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
# TODO: add sequence parallel operator all_gather here
hidden_states = self.mlp(hidden_states)
# TODO: add sequence parallel operator reduce_scatter here
hidden_states = residual + hidden_states
outputs = hidden_states
return outputs
class ParallelLlamaDecoderLayerRmPad(nn.Module):
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
super().__init__()
self.config = config
self.megatron_config = megatron_config
self.hidden_size = config.hidden_size
self.self_attn = ParallelLlamaAttentionRmPad(config=config, megatron_config=megatron_config)
self.mlp = ParallelLlamaMLP(config, megatron_config=megatron_config)
self.input_layernorm = ParallelLlamaRMSNorm(config, megatron_config)
self.post_attention_layernorm = ParallelLlamaRMSNorm(config, megatron_config)
def forward(
self,
hidden_states: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: int = None,
max_seqlen_in_batch: int = None
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states # (total_nnz // sp, 1, hidden_size)
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
# (total_nnz // sp, 1, hidden_size) -> all-gather (total_nnz, 1, hidden_size)
# -> col + row -> reduce-scatter -> (total_nnz // sp, 1, hidden_size)
hidden_states = self.self_attn(hidden_states=hidden_states,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch)
hidden_states = residual + hidden_states
# Fully Connected
# shape changes same as attn
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = hidden_states
return outputs
================================================
FILE: verl/models/llama/megatron/layers/parallel_linear.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/linear.py
from typing import Optional, Tuple
from megatron.core import tensor_parallel
class QKVParallelLinear(tensor_parallel.ColumnParallelLinear):
def __init__(self,
input_size,
num_heads,
num_key_value_heads,
head_dim,
*,
bias=True,
gather_output=True,
skip_bias_add=False,
**kwargs):
# Keep input parameters, and already restrict the head numbers
self.input_size = input_size
self.q_output_size = num_heads * head_dim
self.kv_output_size = num_key_value_heads * head_dim
self.head_dim = head_dim
self.gather_output = gather_output
self.skip_bias_add = skip_bias_add
input_size = self.input_size
output_size = (num_heads + 2 * num_key_value_heads) * self.head_dim
super().__init__(input_size=input_size,
output_size=output_size,
bias=bias,
gather_output=gather_output,
skip_bias_add=skip_bias_add,
**kwargs)
class MergedColumnParallelLinear(tensor_parallel.ColumnParallelLinear):
def __init__(self,
input_size,
gate_ouput_size,
up_output_size,
*,
bias=True,
gather_output=True,
skip_bias_add=False,
**kwargs):
# Keep input parameters, and already restrict the head numbers
self.input_size = input_size
self.output_size = gate_ouput_size + up_output_size
self.gather_output = gather_output
self.skip_bias_add = skip_bias_add
super().__init__(input_size=self.input_size,
output_size=self.output_size,
bias=bias,
gather_output=gather_output,
skip_bias_add=skip_bias_add,
**kwargs)
================================================
FILE: verl/models/llama/megatron/layers/parallel_mlp.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from megatron.core import parallel_state as mpu
from megatron.core import tensor_parallel
from megatron.core import ModelParallelConfig
from torch import nn
from transformers.activations import ACT2FN
from verl.models.llama.megatron.layers.parallel_linear import MergedColumnParallelLinear
from verl.utils.megatron import tensor_parallel as tp_utils
class ParallelLlamaMLP(nn.Module):
def __init__(self, config, megatron_config: ModelParallelConfig = None) -> None:
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
# The weight is only [hidden_size, intermediate_size // model_parallel_world_size]
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear()
if megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
assert row_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(row_kwargs, megatron_config)
tp_utils.update_kwargs_with_config(column_kwargs, megatron_config)
tp_size = mpu.get_tensor_model_parallel_world_size()
self.gate_up_proj = MergedColumnParallelLinear(
input_size=self.hidden_size,
gate_ouput_size=self.intermediate_size,
up_output_size=self.intermediate_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs,
)
self.gate_size = self.intermediate_size // tp_size
self.down_proj = tensor_parallel.RowParallelLinear(input_size=self.intermediate_size,
output_size=self.hidden_size,
bias=False,
input_is_parallel=True,
skip_bias_add=False,
**row_kwargs)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
gate_up = self.gate_up_proj(x)[0]
gate, up = gate_up.split(self.gate_size, dim=-1)
return self.down_proj(self.act_fn(gate) * up)[0]
================================================
FILE: verl/models/llama/megatron/layers/parallel_rmsnorm.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
import torch
from megatron.core import ModelParallelConfig
from torch import nn
from transformers import LlamaConfig
from apex.normalization.fused_layer_norm import fused_rms_norm_affine
from verl.utils.megatron import sequence_parallel as sp_utils
class ParallelLlamaRMSNorm(nn.Module):
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
"""
LlamaRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
if isinstance(config.hidden_size, numbers.Integral):
normalized_shape = (config.hidden_size,)
self.normalized_shape = torch.Size(normalized_shape)
self.weight = nn.Parameter(torch.ones(self.normalized_shape))
self.variance_epsilon = config.rms_norm_eps
if megatron_config.sequence_parallel:
sp_utils.mark_parameter_as_sequence_parallel(self.weight)
def forward(self, hidden_states):
return fused_rms_norm_affine(input=hidden_states,
weight=self.weight,
normalized_shape=self.normalized_shape,
eps=self.variance_epsilon,
memory_efficient=True)
================================================
FILE: verl/models/llama/megatron/modeling_llama_megatron.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch LLaMA model with Megatron-style acceleration."""
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from megatron.core import tensor_parallel
from megatron.core import ModelParallelConfig
from torch import nn
from transformers.modeling_outputs import BaseModelOutputWithPast
from transformers.models.llama.configuration_llama import LlamaConfig
from transformers.models.llama.modeling_llama import CausalLMOutputWithPast
from verl.utils.megatron import sequence_parallel as sp_utils
from verl.utils.megatron import tensor_parallel as tp_utils
from .layers import ParallelLlamaDecoderLayer, ParallelLlamaRMSNorm, ParallelLlamaDecoderLayerRmPad
"""
TODO:
1. Add weight initialization. Here we need to be careful on TP weight init.
2. Add sequence parallel
3. Load checkpoint from meta LLama pretrained checkpoint
"""
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
class ParallelLlamaModel(nn.Module):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
Args:
config: LlamaConfig
"""
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
super().__init__()
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding()
if megatron_config is not None:
assert embedding_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config)
self.embed_tokens = tensor_parallel.VocabParallelEmbedding(num_embeddings=config.vocab_size,
embedding_dim=config.hidden_size,
**embedding_kwargs)
self.layers = nn.ModuleList(
[ParallelLlamaDecoderLayer(config, megatron_config) for _ in range(config.num_hidden_layers)])
self.norm = ParallelLlamaRMSNorm(config, megatron_config)
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype,
tgt_len=input_shape[-1]).to(inputs_embeds.device)
combined_attention_mask = (expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask +
combined_attention_mask)
return combined_attention_mask
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
"""
Args:
input_ids: input ids. shape (batch_size, seq_length)
attention_mask: attention_mask. shape (batch_size, seq_length)
position_ids: position ids. shape (batch_size, seq_length)
Returns:
"""
batch_size, seq_length = input_ids.shape
inputs_embeds = self.embed_tokens(input_ids)
# embed positions
attention_mask = self._prepare_decoder_attention_mask(attention_mask, (batch_size, seq_length), inputs_embeds)
hidden_states = inputs_embeds
for idx, decoder_layer in enumerate(self.layers):
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
)
hidden_states = layer_outputs
hidden_states = self.norm(hidden_states)
return hidden_states
class ParallelLlamaForCausalLM(nn.Module):
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
super().__init__()
self.model = ParallelLlamaModel(config, megatron_config=megatron_config)
self.vocab_size = config.vocab_size
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = tensor_parallel.ColumnParallelLinear(input_size=config.hidden_size,
output_size=config.vocab_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
```"""
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
)
hidden_states = outputs
logits = self.lm_head(hidden_states)[0]
logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits)
logits = logits.float()
return CausalLMOutputWithPast(
loss=None,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
class ParallelLlamaModelRmPad(nn.Module):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
Args:
config: LlamaConfig
"""
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
super().__init__()
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding()
self.megatron_config = megatron_config
if megatron_config is not None:
assert embedding_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config)
self.embed_tokens = tensor_parallel.VocabParallelEmbedding(num_embeddings=config.vocab_size,
embedding_dim=config.hidden_size,
**embedding_kwargs)
self.layers = nn.ModuleList(
[ParallelLlamaDecoderLayerRmPad(config, megatron_config) for _ in range(config.num_hidden_layers)])
self.norm = ParallelLlamaRMSNorm(config, megatron_config)
def forward(self,
input_ids: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: int = None,
max_seqlen_in_batch: int = None) -> Union[Tuple, BaseModelOutputWithPast]:
"""
Args:
input_ids: input ids. shape (1, totol_nnz)
position_ids: position ids. shape (batch_size, seq_length)
Returns:
"""
inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size)
# (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size)
inputs_embeds = inputs_embeds.transpose(0, 1)
if self.megatron_config.sequence_parallel:
inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds)
hidden_states = inputs_embeds
for idx, decoder_layer in enumerate(self.layers):
layer_outputs = decoder_layer(hidden_states,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch)
hidden_states = layer_outputs
hidden_states = self.norm(hidden_states)
return hidden_states
class ParallelLlamaForCausalLMRmPad(nn.Module):
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig):
super().__init__()
self.config = config
self.megatron_config = megatron_config
self.model = ParallelLlamaModelRmPad(config, megatron_config=megatron_config)
self.vocab_size = config.vocab_size
self._init_head()
def _init_head(self):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = tensor_parallel.ColumnParallelLinear(input_size=self.config.hidden_size,
output_size=self.config.vocab_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs)
def _forward_head(self, hidden_states):
# all_gather from sequence parallel region is performed inside lm_head
logits = self.lm_head(hidden_states)[0]
logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp)
logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits) # (total_nnz_padded, 1, vocab_size)
return logits
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
```"""
batch_size, sequence_length = input_ids.shape
# remove padding here
input_ids, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input(input_ids.unsqueeze(dim=-1),
attention_mask) # (total_nnz, 1)
# pad input_ids to multiple of tp for all tp ranks
# TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap
if self.megatron_config.sequence_parallel:
input_ids = sp_utils.pad_to_sequence_parallel(input_ids)
input_ids = input_ids.transpose(0, 1) # (1, total_nnz+pad)
outputs = self.model(input_ids=input_ids,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch)
hidden_states = outputs
logits = self._forward_head(hidden_states)
# remove padding from sequence parallel
if self.megatron_config.sequence_parallel:
totol_nnz = cu_seqlens[-1]
logits = logits[:totol_nnz] # (total_nnz_padded)
logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension
# add removed padding back
logits = pad_input(logits, indices, batch_size,
seqlen=sequence_length) # (batch_size, sequence_length, vocab_size)
return CausalLMOutputWithPast(
loss=None,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
class ParallelLlamaForValueRmPad(ParallelLlamaForCausalLMRmPad):
def _init_head(self):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = nn.Linear(in_features=self.config.hidden_size, out_features=1, bias=False)
# lm_head is effectively the same as sequence parallel
sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight)
def _forward_head(self, hidden_states):
logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1)
logits = logits.float()
if self.megatron_config.sequence_parallel:
logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False)
return logits
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
output = super().forward(input_ids, attention_mask, position_ids)
output.logits = torch.squeeze(output.logits, dim=-1)
return output
"""
Support pipeline parallelism
"""
class ParallelLlamaModelRmPadPP(nn.Module):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
This model definition supports pipeline parallelism. To support pp and vpp,
- This model only contains layer in this pp stage and vpp chunk
- When calling get_model in Megatron, this rank will instantiate all the vpp chunks in this pp.
Args:
config: LlamaConfig
"""
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig, pre_process, post_process):
super().__init__()
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.pre_process = pre_process
self.post_process = post_process
self.megatron_config = megatron_config
embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding()
if megatron_config is not None:
assert embedding_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config)
if pre_process:
self.embed_tokens = tensor_parallel.VocabParallelEmbedding(num_embeddings=config.vocab_size,
embedding_dim=config.hidden_size,
**embedding_kwargs)
else:
self.embed_tokens = None
# pp_rank = megatron_config.pipeline_model_parallel_rank
pp_size = megatron_config.pipeline_model_parallel_size
self.num_layer_per_pp = config.num_hidden_layers // pp_size
vpp_size = megatron_config.virtual_pipeline_model_parallel_size
if vpp_size is not None:
self.num_layer_vpp_chunk = self.num_layer_per_pp // vpp_size
self.num_layer_this_model = self.num_layer_vpp_chunk
# vpp_rank = megatron_config.virtual_pipeline_model_parallel_rank
# self.offset = vpp_rank * (
# config.num_hidden_layers // megatron_config.virtual_pipeline_model_parallel_size) + \
# (megatron_config.pipeline_model_parallel_rank * self.num_layer_vpp_chunk)
else:
self.num_layer_this_model = self.num_layer_per_pp
# self.offset = pp_rank * self.num_layer_per_pp
layers = []
for i in range(self.num_layer_this_model):
layer = ParallelLlamaDecoderLayerRmPad(config, megatron_config)
# setattr(layer, 'hidden_layer_index', self.offset + i)
layers.append(layer)
self.layers = nn.ModuleList(layers)
if post_process:
self.norm = ParallelLlamaRMSNorm(config, megatron_config)
else:
self.norm = None
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
def forward(self,
input_ids: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: int = None,
max_seqlen_in_batch: int = None) -> Union[Tuple, BaseModelOutputWithPast]:
"""
Args:
input_ids: input ids. shape (1, totol_nnz)
position_ids: position ids. shape (batch_size, seq_length)
Returns:
"""
if self.pre_process:
inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size)
# vocab parallel embedding will not do sequence parallel reduce-scatter in open source megatron
# so need to deal with it by handle here:
# (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size)
inputs_embeds = inputs_embeds.transpose(0, 1)
if self.megatron_config.sequence_parallel:
inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds)
hidden_states = inputs_embeds
else:
# self.hidden_states should be passed by Megatron
hidden_states = self.input_tensor
for idx, decoder_layer in enumerate(self.layers):
layer_outputs = decoder_layer(hidden_states,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch)
hidden_states = layer_outputs
if self.post_process:
hidden_states = self.norm(hidden_states)
return hidden_states
class ParallelLlamaForCausalLMRmPadPP(nn.Module):
def __init__(self, config: LlamaConfig, megatron_config: ModelParallelConfig, pre_process, post_process,
share_embeddings_and_output_weights):
super().__init__()
self.config = config
self.megatron_config = megatron_config
self.model = ParallelLlamaModelRmPadPP(config,
megatron_config=megatron_config,
pre_process=pre_process,
post_process=post_process)
self.share_embeddings_and_output_weights = share_embeddings_and_output_weights
self.vocab_size = config.vocab_size
self.pre_process = pre_process
self.post_process = post_process
if post_process:
self._init_head()
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
assert len(input_tensor) == 1
self.model.set_input_tensor(input_tensor[0])
def _init_head(self):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = tensor_parallel.ColumnParallelLinear(input_size=self.config.hidden_size,
output_size=self.config.vocab_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs)
def _forward_head(self, hidden_states):
# all_gather from sequence parallel region is performed inside lm_head
# logits shape before forward_head hidden_states.shape: [4, 32, 4096]
logits = self.lm_head(hidden_states)[0]
# logits shape after forward_head logits.shape: [8, 32, 8]
logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp)
return logits
def forward(
self,
# original input
*,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
```"""
# Note that input_ids, attention_mask and position_ids should be passed to every pp layer.
# In the first pp, input_ids will be used, in other pp layers hidden_states will be used inside self.model
batch_size, sequence_length = input_ids.shape
# remove padding here
input_ids_rmpad, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input(input_ids.unsqueeze(dim=-1),
attention_mask) # (total_nnz, 1)
# pad input_ids to multiple of tp for all tp ranks
# TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap
if self.megatron_config.sequence_parallel:
input_ids_rmpad = sp_utils.pad_to_sequence_parallel(input_ids_rmpad)
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz+pad)
outputs = self.model(input_ids=input_ids_rmpad,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch)
if self.post_process:
hidden_states = outputs
# print(f'hidden_states.shape = {hidden_states.shape}') # torch.Size([4, 32, 4096])
logits = self._forward_head(hidden_states)
logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension # torch.Size([8, 32, 16])
# remove padding from sequence parallel
if self.megatron_config.sequence_parallel:
totol_nnz = cu_seqlens[-1]
logits = logits[:totol_nnz] # (total_nnz_padded)
# add removed padding back. If input is already rmpad, we let the caller pad_input
logits = pad_input(logits, indices, batch_size,
seqlen=sequence_length) # (batch_size, sequence_length, vocab_size)
return CausalLMOutputWithPast(
loss=None,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
else:
return outputs
class ParallelLlamaForValueRmPadPP(ParallelLlamaForCausalLMRmPadPP):
def _init_head(self):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = nn.Linear(in_features=self.config.hidden_size, out_features=1, bias=False)
# lm_head is effectively the same as sequence parallel
sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight)
def _forward_head(self, hidden_states):
logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1)
logits = logits.float()
if self.megatron_config.sequence_parallel:
logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False)
return logits
def forward(
self,
*,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
output = super().forward(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids)
if self.post_process:
output.logits = torch.squeeze(output.logits, dim=-1)
return output
else:
return output
================================================
FILE: verl/models/qwen2/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/models/qwen2/megatron/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .modeling_qwen2_megatron import (
# original model with megatron
ParallelQwen2Model,
ParallelQwen2ForCausalLM,
# rmpad with megatron
ParallelQwen2ForCausalLMRmPad,
ParallelQwen2ForValueRmPad,
# rmpad with megatron and pipeline parallelism
ParallelQwen2ForCausalLMRmPadPP,
ParallelQwen2ForValueRmPadPP)
================================================
FILE: verl/models/qwen2/megatron/checkpoint_utils/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/models/qwen2/megatron/checkpoint_utils/qwen2_loader.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import time
from typing import Dict, Any, Callable, Optional
import torch.distributed as dist
def _megatron_calc_layer_map(config):
"""Calculate the mapping of global layer_idx to local layer_idx
Returns:
layer_map (Dict: int -> tuple(int, int, int)):
mapping from the global layer index to
a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model)
"""
import megatron
from megatron.core import mpu
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
layer_map = dict()
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
for pp_rank_idx in range(pp_size):
for virtual_pp_rank_idx in range(virtual_pp_size):
layer_offset = (virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) +
pp_rank_idx * num_layers_per_model)
for layer_idx in range(num_layers_per_model):
layer_map[layer_offset + layer_idx] = (
pp_rank_idx,
virtual_pp_rank_idx,
layer_idx,
)
return layer_map
def load_state_dict_to_megatron_qwen2(state_dict, wrapped_models, config, params_dtype, is_value_model=False):
"""Load merged state_dict to sharded Megatron module in training.
"""
import megatron
from megatron.core import mpu
from megatron.training.utils import print_rank_0, unwrap_model
from megatron.core.transformer.module import Float16Module
from megatron.core import DistributedDataParallel as LocalDDP
from torch.nn.parallel import DistributedDataParallel as torchDDP
start_time = time.time()
def _get_gpt_model(model):
return model
def broadcast_params(module):
for param in module.parameters():
torch.distributed.broadcast(param.data,
src=mpu.get_data_parallel_src_rank(),
group=mpu.get_data_parallel_group())
dp_rank = mpu.get_data_parallel_rank()
pp_rank = mpu.get_pipeline_model_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
mp_group = mpu.get_model_parallel_group()
if torch.distributed.get_rank() == 0:
assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0"
assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0"
assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0"
if not isinstance(wrapped_models, (list, tuple)):
wrapped_models = list(wrapped_models)
assert len(wrapped_models) == virtual_pp_size
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
models = [None] * len(wrapped_models)
for i, wrapped_model in enumerate(wrapped_models):
models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module))
gpt_model_module = _get_gpt_model(models[i])
assert len(gpt_model_module.model.layers) == num_layers_per_model
def _broadcast_tensor(tensor, name) -> torch.Tensor:
"""broadcast tensor from rank0 across mp_group"""
nonlocal state_dict
nonlocal mp_group
if torch.distributed.get_rank() == 0:
if name in state_dict:
weight = state_dict[name]
tensor_shape = weight.shape
else:
tensor_shape = None
else:
weight = None
tensor_shape = None
obj_list = [tensor_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
tensor_shape = obj_list[0]
if tensor_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tensor:[{name}] not in state_dict, skip load")
return
if tensor is None:
tensor = torch.empty(
tensor_shape,
dtype=params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
if torch.distributed.get_rank() == 0:
tensor.data.copy_(weight)
dist.broadcast(tensor, src=0, group=mp_group)
def _broadcast_tp_shard_tensor_vocab(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == 0:
if name in state_dict:
full_weight = state_dict[name]
if mutate_func is not None:
full_weight = mutate_func(full_weight)
tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
else:
assert (tensor.shape == chunk_shape
), f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}"
sync_tensor = torch.empty_like(tensor, device=torch.cuda.current_device(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == 0:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=0, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
def _broadcast_tp_shard_tensor(tensor, name, chunk_dim=0, mutate_func=None) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == 0:
if name in state_dict:
full_weight = state_dict[name]
if mutate_func is not None:
full_weight = mutate_func(full_weight)
tensor_chunk = torch.chunk(full_weight, tp_size, dim=chunk_dim)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
else:
assert (tensor.shape == chunk_shape
), f"rank #{torch.distributed.get_rank()} tensor {name} shape {tensor.shape} != {chunk_shape}"
sync_tensor = torch.empty_like(tensor, device=torch.cuda.current_device(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == 0:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=0, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == 0:
gate_weight = state_dict[gate_name]
up_weight = state_dict[up_name]
new_gate_up_weight = torch.empty(config.intermediate_size * 2,
config.hidden_size,
dtype=params_dtype,
device=torch.cuda.current_device())
for i in range(tp_size):
intermediate_size_tp = config.intermediate_size // tp_size
gate_weight_tp = gate_weight[i * intermediate_size_tp:(i + 1) * intermediate_size_tp]
up_weight_tp = up_weight[i * intermediate_size_tp:(i + 1) * intermediate_size_tp]
new_gate_up_weight[intermediate_size_tp * 2 * i:intermediate_size_tp * 2 * (i + 1)].copy_(
torch.cat([gate_weight_tp, up_weight_tp], dim=0))
tensor_chunk = torch.chunk(new_gate_up_weight, tp_size, dim=0)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
else:
assert (
tensor.shape == chunk_shape
), f"rank #{torch.distributed.get_rank() == 0:} tensor {gate_name, up_name} shape {tensor.shape} != {chunk_shape}"
sync_tensor = torch.empty_like(tensor, device=torch.cuda.current_device(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == 0:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=0, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, bias=False) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
if torch.distributed.get_rank() == 0:
assert (q_name in state_dict and k_name in state_dict and v_name in state_dict)
full_weight_q = state_dict[q_name]
full_weight_k = state_dict[k_name]
full_weight_v = state_dict[v_name]
hidden_size_per_head = config.hidden_size // config.num_attention_heads
if config.num_key_value_heads >= tp_size:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size
total_size = q_size_tp + 2 * kv_size_tp
if not bias:
new_weight_qkv = torch.empty(total_size * tp_size,
config.hidden_size,
dtype=params_dtype,
device=torch.cuda.current_device())
else:
new_weight_qkv = torch.empty(total_size * tp_size,
dtype=params_dtype,
device=torch.cuda.current_device())
for i in range(tp_size):
q_part = full_weight_q[i * q_size_tp:(i + 1) * q_size_tp]
k_part = full_weight_k[i * kv_size_tp:(i + 1) * kv_size_tp]
v_part = full_weight_v[i * kv_size_tp:(i + 1) * kv_size_tp]
new_weight_qkv[i * total_size:(i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part],
dim=0))
else:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head
total_size = q_size_tp + 2 * kv_size_tp
if not bias:
new_weight_qkv = torch.empty(total_size * tp_size,
config.hidden_size,
dtype=params_dtype,
device=torch.cuda.current_device())
else:
new_weight_qkv = torch.empty(total_size * tp_size,
dtype=params_dtype,
device=torch.cuda.current_device())
for i in range(tp_size):
q_part = full_weight_q[i * q_size_tp:(i + 1) * q_size_tp]
start_idx = i * config.num_key_value_heads // tp_size * hidden_size_per_head
end_idx = (i * config.num_key_value_heads // tp_size + 1) * hidden_size_per_head
k_part = full_weight_k[start_idx:end_idx]
v_part = full_weight_v[start_idx:end_idx]
new_weight_qkv[i * total_size:(i + 1) * total_size].copy_(torch.cat([q_part, k_part, v_part],
dim=0))
tensor_chunk = torch.chunk(new_weight_qkv, tp_size, dim=0)
chunk_shape = tensor_chunk[0].shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=0, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not in state_dict, skip loading")
return
if tensor is None:
sync_tensor = torch.empty(
chunk_shape,
dtype=params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
else:
assert (tensor.shape == chunk_shape
), f"rank #{torch.distributed.get_rank()} tensor {q_name} shape {tensor.shape} != {chunk_shape}"
sync_tensor = torch.empty_like(tensor, device=torch.cuda.current_device(), requires_grad=False)
for i in range(tp_size):
if torch.distributed.get_rank() == 0:
sync_tensor.data.copy_(tensor_chunk[i])
dist.broadcast(sync_tensor, src=0, group=mp_group)
if (i == tp_rank) and (tensor is not None):
tensor.data.copy_(sync_tensor)
if dp_rank == 0:
# Embeddings
# -------------------
print_rank_0("loading embeddings...")
gpt_model_module = _get_gpt_model(models[0])
embed_tokens_weight = None
if pp_rank == 0:
embed_tokens_weight = gpt_model_module.model.embed_tokens.weight
_broadcast_tp_shard_tensor_vocab(embed_tokens_weight, "model.embed_tokens.weight")
# Transformer layers
# -------------------
layer_map = _megatron_calc_layer_map(config)
for layer in range(config.num_hidden_layers):
print_rank_0(f"loading layer #{layer}...")
layer_name = f"model.layers.{layer}"
dst_pp_rank, dst_virtual_pp_rank, dst_layer_idx = layer_map[layer]
gpt_model_module = _get_gpt_model(models[dst_virtual_pp_rank])
sync_layer = gpt_model_module.model.layers[dst_layer_idx]
_broadcast_tensor(
sync_layer.input_layernorm.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.input_layernorm.weight",
)
_broadcast_tp_shard_tensor_qkv(
sync_layer.self_attn.qkv_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.q_proj.weight",
f"{layer_name}.self_attn.k_proj.weight",
f"{layer_name}.self_attn.v_proj.weight",
)
_broadcast_tp_shard_tensor_qkv(sync_layer.self_attn.qkv_proj.bias if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.q_proj.bias",
f"{layer_name}.self_attn.k_proj.bias",
f"{layer_name}.self_attn.v_proj.bias",
bias=True)
_broadcast_tp_shard_tensor(
sync_layer.self_attn.o_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.self_attn.o_proj.weight",
chunk_dim=1,
)
_broadcast_tensor(
sync_layer.post_attention_layernorm.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.post_attention_layernorm.weight",
)
_broadcast_tp_shard_tensor_gate_up(sync_layer.mlp.gate_up_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.mlp.gate_proj.weight", f"{layer_name}.mlp.up_proj.weight")
_broadcast_tp_shard_tensor(
sync_layer.mlp.down_proj.weight if dst_pp_rank == pp_rank else None,
f"{layer_name}.mlp.down_proj.weight",
chunk_dim=1,
)
# Final Layernorm
# -------------------
print_rank_0("loading final layernorm...")
gpt_model_module = _get_gpt_model(models[-1])
_broadcast_tensor(
getattr(gpt_model_module.model.norm, "weight", None),
"model.norm.weight",
)
print_rank_0("loading lm_head...")
lm_head_weight = None
if pp_rank + 1 == pp_size:
lm_head_weight = gpt_model_module.lm_head.weight
if is_value_model:
# if torch.distributed.get_rank() == 0:
if 'lm_head.weight' in state_dict and state_dict['lm_head.weight'].shape[0] == 1:
_broadcast_tensor(lm_head_weight, "lm_head.weight")
elif 'reward_head.weight' in state_dict and state_dict['reward_head.weight'].shape[0] == 1:
_broadcast_tensor(lm_head_weight, "reward_head.weight")
print_rank_0('load lm_head from value_head weight')
else:
_broadcast_tensor(None, "lm_head.weight")
print_rank_0('fail to match lm_head in value_model')
# else:
# _broadcast_tensor(lm_head_weight, "lm_head.weight")
else:
_broadcast_tp_shard_tensor(lm_head_weight, "lm_head.weight")
dist.barrier()
# Broadcast weights inside data parallel groups
for wrapped_model in wrapped_models:
broadcast_params(wrapped_model)
torch.cuda.empty_cache()
print_rank_0(f"loading megatron ckpt done, time elapsed {time.time() - start_time}s")
================================================
FILE: verl/models/qwen2/megatron/checkpoint_utils/qwen2_saver.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import megatron
from megatron.core import mpu
from megatron.training.utils import print_rank_0, unwrap_model
from megatron.core.transformer.module import Float16Module
from megatron.core.distributed import DistributedDataParallel as LocalDDP
from torch.nn.parallel import DistributedDataParallel as torchDDP
import torch
import time
from typing import Optional
import torch.distributed as dist
from megatron import get_args
def _megatron_calc_global_rank(tp_rank: int = 0, dp_rank: int = 0, pp_rank: int = 0):
"""given TP,DP,PP rank to get the global rank."""
args = get_args()
tp_size = mpu.get_tensor_model_parallel_world_size()
dp_size = mpu.get_data_parallel_world_size()
pp_size = mpu.get_pipeline_model_parallel_world_size()
assert (tp_size * dp_size * pp_size == torch.distributed.get_world_size()
), f"{tp_size} x {dp_size} x {pp_size} != {torch.distributed.get_world_size()}"
if args.switch_dp_and_pp_grouping:
# TP-PP-DP grouping
return (dp_rank * pp_size + pp_rank) * tp_size + tp_rank
else:
# TP-DP-PP grouping
return (pp_rank * dp_size + dp_rank) * tp_size + tp_rank
def _megatron_calc_layer_map(config):
"""Calculate the mapping of global layer_idx to local layer_idx
Returns:
layer_map (Dict: int -> tuple(int, int, int)):
mapping from the global layer index to
a tuple of (pp_rank, virtual_pp_rank, layer_idx inside model)
"""
import megatron
from megatron.core import mpu
pp_size = mpu.get_pipeline_model_parallel_world_size()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
args = megatron.get_args()
layer_map = dict()
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
for pp_rank_idx in range(pp_size):
for virtual_pp_rank_idx in range(virtual_pp_size):
layer_offset = (virtual_pp_rank_idx * (config.num_hidden_layers // virtual_pp_size) +
pp_rank_idx * num_layers_per_model)
for layer_idx in range(num_layers_per_model):
layer_map[layer_offset + layer_idx] = (
pp_rank_idx,
virtual_pp_rank_idx,
layer_idx,
)
return layer_map
def merge_megatron_ckpt_llama(wrapped_models, config, is_value_model=False, dtype='bf16'):
"""Merge sharded parameters of a Megatron module into a merged checkpoint.
Args:
wrapped_modelss (list of megatron.core.distributed.DistributedDataParallel):
The local DDP wrapped megatron modules.
dtype (str or None):
The data type of state_dict. if None, the data type of the original parameters
is used.
gpt_model_key: key to access model
Returns:
state_dict (dict):
The merged state_dict in rank 0, and an empty dictionary in other ranks.
"""
start_time = time.time()
args = megatron.get_args()
def _get_gpt_model(model):
return model
dp_rank = mpu.get_data_parallel_rank()
pp_size = mpu.get_pipeline_model_parallel_world_size()
pp_rank = mpu.get_pipeline_model_parallel_rank()
virtual_pp_size = mpu.get_virtual_pipeline_model_parallel_world_size() or 1
mp_group = mpu.get_model_parallel_group()
if dist.get_rank() == 0:
assert mp_group.rank() == 0, f"mp_rank:[{mp_group.rank}] != 0 on rank #0"
assert pp_rank == 0, f"pp_rank:[{pp_rank}] != 0 on rank #0"
assert dp_rank == 0, f"dp_rank:[{dp_rank}] != 0 on rank #0"
if not isinstance(wrapped_models, (list, tuple)):
wrapped_models = list(wrapped_models)
assert len(wrapped_models) == virtual_pp_size
num_layers_per_model = config.num_hidden_layers // pp_size // virtual_pp_size
assert num_layers_per_model * pp_size * virtual_pp_size == config.num_hidden_layers
models = [None] * len(wrapped_models)
for i, wrapped_model in enumerate(wrapped_models):
models[i] = unwrap_model(wrapped_model, (torchDDP, LocalDDP, Float16Module))
assert len(models[i].model.layers
) == num_layers_per_model, 'len model layers {} not equal to num_layers_per_model {}'.format(
len(models[i].model.layers), num_layers_per_model)
state_dict = dict()
def _get_cpu_tensor(tensor: torch.Tensor):
if tensor is None:
return None
if tensor.device == torch.device("cpu"):
return tensor.detach().clone()
return tensor.detach().cpu()
def _broadcast_tensor(tensor, name, src_pp_rank) -> torch.Tensor:
"""broadcast tensor across mp_group"""
nonlocal state_dict
nonlocal mp_group
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank)
if torch.distributed.get_rank() == src_rank:
if tensor is None:
weight = None
tensor_shape = None
else:
weight = tensor
tensor_shape = weight.shape
else:
weight = None
tensor_shape = None
obj_list = [tensor_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
tensor_shape = obj_list[0]
if tensor_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tensor:[{name}] not exist, skip collect")
return
if weight is None:
weight = torch.empty(
tensor_shape,
dtype=args.params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
dist.broadcast(weight, src=src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
state_dict[name] = _get_cpu_tensor(weight)
def _broadcast_tp_shard_tensor(tensor, name, src_pp_rank, concat_dim=0, mutate_func=None) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank)
if torch.distributed.get_rank() == src_rank:
chunk_shape = tensor.shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{name}] not exist, skip collecting")
return
buffer_tensor = torch.empty(
chunk_shape,
dtype=args.params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
chunk_tensors = [None] * tp_size
for i in range(tp_size):
cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank)
sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor
dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
chunk_tensors[i] = _get_cpu_tensor(sync_tensor)
if torch.distributed.get_rank() == 0:
full_tensor = torch.concat(chunk_tensors, dim=concat_dim)
if mutate_func is not None:
full_tensor = mutate_func(full_tensor)
state_dict[name] = full_tensor
def _broadcast_tp_shard_tensor_gate_up(tensor, gate_name, up_name, src_pp_rank) -> torch.Tensor:
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank)
if torch.distributed.get_rank() == src_rank:
chunk_shape = tensor.shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{gate_name, up_name}] not exist, skip collecting")
return
buffer_tensor = torch.empty(
chunk_shape,
dtype=args.params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
chunk_tensors = [None] * tp_size
for i in range(tp_size):
cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank)
sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor
dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
chunk_tensors[i] = _get_cpu_tensor(sync_tensor)
if torch.distributed.get_rank() == 0:
full_tensor = torch.concat(chunk_tensors, dim=0)
intermediate_size_tp = config.intermediate_size // tp_size
gate_weight_list = []
up_weight_list = []
for i in range(tp_size):
gate_up_weight_tp = full_tensor[intermediate_size_tp * 2 * i:intermediate_size_tp * 2 * (i + 1)]
gate_weight_tp = gate_up_weight_tp[:intermediate_size_tp]
up_weight_tp = gate_up_weight_tp[intermediate_size_tp:]
gate_weight_list.append(gate_weight_tp)
up_weight_list.append(up_weight_tp)
state_dict[gate_name] = torch.cat(gate_weight_list, dim=0)
state_dict[up_name] = torch.cat(up_weight_list, dim=0)
def _broadcast_tp_shard_tensor_qkv(tensor, q_name, k_name, v_name, src_pp_rank):
"""broadcast tensor in tp shards across mp_group"""
nonlocal state_dict
nonlocal mp_group
tp_rank = mpu.get_tensor_model_parallel_rank()
tp_size = mpu.get_tensor_model_parallel_world_size()
src_rank = _megatron_calc_global_rank(tp_rank=0, dp_rank=0, pp_rank=src_pp_rank)
if torch.distributed.get_rank() == src_rank:
chunk_shape = tensor.shape
else:
chunk_shape = None
obj_list = [chunk_shape]
dist.broadcast_object_list(obj_list, src=src_rank, group=mp_group)
chunk_shape = obj_list[0]
if chunk_shape is None:
# all or none ranks in the mp_group should reach here
print_rank_0(f"tp_shard tensor:[{q_name}] not exist, skip collecting")
return
buffer_tensor = torch.empty(
chunk_shape,
dtype=args.params_dtype,
device=torch.cuda.current_device(),
requires_grad=False,
)
chunk_tensors = [None] * tp_size
for i in range(tp_size):
cur_src_rank = _megatron_calc_global_rank(tp_rank=i, dp_rank=0, pp_rank=src_pp_rank)
sync_tensor = tensor if torch.distributed.get_rank() == cur_src_rank else buffer_tensor
dist.broadcast(sync_tensor, src=cur_src_rank, group=mp_group)
if torch.distributed.get_rank() == 0:
chunk_tensors[i] = _get_cpu_tensor(sync_tensor)
if torch.distributed.get_rank() == 0:
full_tensor = torch.concat(chunk_tensors, dim=0)
q_weight_list = []
k_weight_list = []
v_weight_list = []
hidden_size_per_head = config.hidden_size // config.num_attention_heads
if config.num_key_value_heads >= tp_size:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head * config.num_key_value_heads // tp_size
total_size = q_size_tp + 2 * kv_size_tp
for i in range(tp_size):
qkv_part = full_tensor[i * total_size:(i + 1) * total_size]
q_part = qkv_part[:q_size_tp]
k_part = qkv_part[q_size_tp:q_size_tp + kv_size_tp]
v_part = qkv_part[q_size_tp + kv_size_tp:total_size]
q_weight_list.append(q_part)
k_weight_list.append(k_part)
v_weight_list.append(v_part)
else:
q_size_tp = config.hidden_size // tp_size
kv_size_tp = hidden_size_per_head
total_size = q_size_tp + 2 * kv_size_tp
for i in range(tp_size):
qkv_part = full_tensor[i * total_size:(i + 1) * total_size]
q_part = qkv_part[:q_size_tp]
k_part = qkv_part[q_size_tp:q_size_tp + kv_size_tp]
v_part = qkv_part[q_size_tp + kv_size_tp:total_size]
q_weight_list.append(q_part)
if i * config.num_key_value_heads % tp_size == 0:
k_weight_list.append(k_part)
v_weight_list.append(v_part)
state_dict[q_name] = torch.cat(q_weight_list, dim=0)
state_dict[k_name] = torch.cat(k_weight_list, dim=0)
state_dict[v_name] = torch.cat(v_weight_list, dim=0)
# empty cache before collecting weights
torch.cuda.empty_cache()
# Embeddings
# -------------------
if dp_rank == 0:
# Embeddings
# -------------------
print_rank_0("collecting embeddings...")
gpt_model_module = _get_gpt_model(models[0])
_broadcast_tp_shard_tensor(
gpt_model_module.model.embed_tokens.weight if pp_rank == 0 else None,
"model.embed_tokens.weight",
src_pp_rank=0,
)
# Transformer layers
# -------------------
layer_map = _megatron_calc_layer_map(config)
for layer in range(config.num_hidden_layers):
print_rank_0(f"collecting layer #{layer}...")
layer_name = f"model.layers.{layer}"
src_pp_rank, src_virtual_pp_rank, src_layer_idx = layer_map[layer]
gpt_model_module = _get_gpt_model(models[src_virtual_pp_rank])
sync_layer = gpt_model_module.model.layers[src_layer_idx]
_broadcast_tensor(
sync_layer.input_layernorm.weight,
f"{layer_name}.input_layernorm.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor_qkv(
sync_layer.self_attn.qkv_proj.weight,
f"{layer_name}.self_attn.q_proj.weight",
f"{layer_name}.self_attn.k_proj.weight",
f"{layer_name}.self_attn.v_proj.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor(
sync_layer.self_attn.o_proj.weight,
f"{layer_name}.self_attn.o_proj.weight",
concat_dim=1,
src_pp_rank=src_pp_rank,
)
_broadcast_tensor(
sync_layer.post_attention_layernorm.weight,
f"{layer_name}.post_attention_layernorm.weight",
src_pp_rank=src_pp_rank,
)
_broadcast_tp_shard_tensor_gate_up(sync_layer.mlp.gate_up_proj.weight,
f"{layer_name}.mlp.gate_proj.weight",
f"{layer_name}.mlp.up_proj.weight",
src_pp_rank=src_pp_rank)
_broadcast_tp_shard_tensor(
sync_layer.mlp.down_proj.weight,
f"{layer_name}.mlp.down_proj.weight",
concat_dim=1,
src_pp_rank=src_pp_rank,
)
# Final Layernorm
# -------------------
print_rank_0("collecting final layernorm...")
gpt_model_module = _get_gpt_model(models[-1])
_broadcast_tensor(
getattr(gpt_model_module.model.norm, "weight", None),
"model.norm.weight",
src_pp_rank=pp_size - 1,
)
print_rank_0("collecting lm_head...")
if is_value_model:
_broadcast_tensor(getattr(gpt_model_module.lm_head, "weight", None) if pp_rank == pp_size - 1 else None,
"reward_head.weight",
src_pp_rank=pp_size - 1)
else:
_broadcast_tp_shard_tensor(
getattr(gpt_model_module.lm_head, "weight", None) if pp_rank == pp_size - 1 else None,
"lm_head.weight",
src_pp_rank=pp_size - 1,
)
dist.barrier()
torch.cuda.empty_cache()
if torch.distributed.get_rank() == 0:
if dtype == "fp16":
dtype = torch.float16
elif dtype == "bf16":
dtype = torch.bfloat16
elif dtype is None or dtype == "fp32":
dtype = torch.float32
else:
print(f'Unknown/unsupported dtype to save: {dtype}"')
exit(1)
for k, v in state_dict.items():
if dtype != v.dtype:
state_dict[k] = v.to(dtype)
print_rank_0(f"merge megatron ckpt done, time elapsed {time.time() - start_time}s")
return state_dict
================================================
FILE: verl/models/qwen2/megatron/layers/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .parallel_attention import ParallelQwen2Attention
from .parallel_decoder import ParallelQwen2DecoderLayer, ParallelQwen2DecoderLayerRmPad
from .parallel_mlp import ParallelQwen2MLP
from .parallel_rmsnorm import ParallelQwen2RMSNorm
================================================
FILE: verl/models/qwen2/megatron/layers/parallel_attention.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional, Tuple
import torch
from megatron.core import parallel_state as mpu
from megatron.core import tensor_parallel
from megatron.core import ModelParallelConfig
from torch import nn
from transformers import Qwen2Config
from verl.models.qwen2.megatron.layers.parallel_linear import QKVParallelLinear
from verl.utils.megatron import tensor_parallel as tp_utils
class Qwen2RotaryEmbedding(nn.Module):
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
super().__init__()
self.dim = dim
self.max_position_embeddings = max_position_embeddings
self.base = base
inv_freq = 1.0 / (self.base**(torch.arange(0, self.dim, 2).float().to(device) / self.dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
# Build here to make `torch.jit.trace` work.
self._set_cos_sin_cache(seq_len=max_position_embeddings,
device=self.inv_freq.device,
dtype=torch.get_default_dtype())
def _set_cos_sin_cache(self, seq_len, device, dtype):
self.max_seq_len_cached = seq_len
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
def forward(self, x, seq_len=None):
# x: [bs, num_attention_heads, seq_len, head_size]
if seq_len > self.max_seq_len_cached:
self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
return (
self.cos_cached[:seq_len].to(dtype=x.dtype),
self.sin_cached[:seq_len].to(dtype=x.dtype),
)
class Qwen2LinearScalingRotaryEmbedding(Qwen2RotaryEmbedding):
"""Qwen2RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
self.scaling_factor = scaling_factor
super().__init__(dim, max_position_embeddings, base, device)
def _set_cos_sin_cache(self, seq_len, device, dtype):
self.max_seq_len_cached = seq_len
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
t = t / self.scaling_factor
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
class Qwen2DynamicNTKScalingRotaryEmbedding(Qwen2RotaryEmbedding):
"""Qwen2RotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
self.scaling_factor = scaling_factor
super().__init__(dim, max_position_embeddings, base, device)
def _set_cos_sin_cache(self, seq_len, device, dtype):
self.max_seq_len_cached = seq_len
if seq_len > self.max_position_embeddings:
base = self.base * ((self.scaling_factor * seq_len / self.max_position_embeddings) -
(self.scaling_factor - 1))**(self.dim / (self.dim - 2))
inv_freq = 1.0 / (base**(torch.arange(0, self.dim, 2).float().to(device) / self.dim))
self.register_buffer("inv_freq", inv_freq, persistent=False)
t = torch.arange(self.max_seq_len_cached, device=device, dtype=self.inv_freq.dtype)
freqs = torch.einsum("i,j->ij", t, self.inv_freq)
# Different from paper, but it uses a different permutation in order to obtain the same calculation
emb = torch.cat((freqs, freqs), dim=-1)
self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., :x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
class ParallelQwen2Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig):
super().__init__()
self.config = config
self.megatron_config = megatron_config
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.hidden_size // self.num_heads
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
# assign values after tp
tp_size = mpu.get_tensor_model_parallel_world_size()
assert self.num_heads % tp_size == 0, f'num_head must be divisible by tp_size. Got num_head={self.num_heads}, tp_size={tp_size}'
assert self.num_key_value_heads % tp_size == 0, \
f'num_key_value_heads must be divisible by tp_size. Got num_key_value_heads={self.num_key_value_heads}, tp_size={tp_size}'
self.num_heads_per_tp = self.num_heads // tp_size
self.num_key_value_heads_per_tp = self.num_key_value_heads // tp_size
self.hidden_size_per_tp = self.hidden_size // tp_size
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads}).")
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear()
if megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
assert row_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(column_kwargs, megatron_config)
tp_utils.update_kwargs_with_config(row_kwargs, megatron_config)
# [self.q_size, self.k_size, self.v_size]
self.qkv_proj = QKVParallelLinear(
input_size=self.hidden_size,
num_heads=self.num_heads,
num_key_value_heads=self.num_key_value_heads,
head_dim=self.head_dim,
# bias=config.attention_bias,
bias=True,
gather_output=False,
skip_bias_add=False,
**column_kwargs)
self.q_size = self.num_heads_per_tp * self.head_dim
self.k_size = self.num_key_value_heads_per_tp * self.head_dim
self.v_size = self.num_key_value_heads_per_tp * self.head_dim
self.o_proj = tensor_parallel.RowParallelLinear(
input_size=self.num_heads * self.head_dim,
output_size=self.hidden_size,
# bias=config.attention_bias,
bias=False,
input_is_parallel=True,
skip_bias_add=False,
**row_kwargs)
self._init_rope()
def _init_rope(self):
self.rotary_emb = Qwen2RotaryEmbedding(
self.head_dim,
max_position_embeddings=self.max_position_embeddings,
base=self.rope_theta,
)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
qkv = self.qkv_proj(hidden_states)[0]
query_states, key_states, value_states = qkv.split([self.q_size, self.k_size, self.v_size], dim=-1)
query_states = query_states.view(bsz, q_len, self.num_heads_per_tp, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads_per_tp, self.head_dim).transpose(1, 2)
kv_seq_len = key_states.shape[-2]
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attn_weights.size() != (bsz, self.num_heads_per_tp, q_len, kv_seq_len):
raise ValueError(
f"Attention weights should be of size {(bsz, self.num_heads_per_tp, q_len, kv_seq_len)}, but is"
f" {attn_weights.size()}")
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}")
attn_weights = attn_weights + attention_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads_per_tp, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads_per_tp, q_len, self.head_dim)}, but is"
f" {attn_output.size()}")
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size_per_tp)
attn_output = self.o_proj(attn_output)[0]
return attn_output
"""
Remove padding Attention
- Using Flash-attn 2
- Compatible with sequence parallel
"""
from transformers.utils import is_flash_attn_2_available
import torch.nn.functional as F
from einops import rearrange
if is_flash_attn_2_available():
from flash_attn import flash_attn_varlen_func
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
def apply_rotary_pos_emb_rmpad(q, k, cos, sin, position_ids, indices, sequence_length):
batch_size = position_ids.shape[0]
q = pad_input(q, indices, batch_size, sequence_length) # (batch_size, seqlen, num_head, head_dim)
k = pad_input(k, indices, batch_size, sequence_length)
cos = cos[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]
sin = sin[position_ids].unsqueeze(2) # [bs, seq_len, 1, dim]
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
q_embed = index_first_axis(rearrange(q_embed, "b s ... -> (b s) ..."), indices)
k_embed = index_first_axis(rearrange(k_embed, "b s ... -> (b s) ..."), indices)
return q_embed, k_embed
from flash_attn.layers.rotary import apply_rotary_emb
# use flash-attn rotary embeddings with rmpad
# cos/sin shoudl be: (seq_length, rotary_dim / 2)
def apply_rotary_pos_emb_rmpad_flash(q, k, cos, sin, cu_seqlens, max_seqlen):
q_embed = apply_rotary_emb(q,
cos,
sin,
interleaved=False,
inplace=False,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen)
k_embed = apply_rotary_emb(k,
cos,
sin,
interleaved=False,
inplace=False,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen)
return q_embed, k_embed
class ParallelQwen2AttentionRmPad(ParallelQwen2Attention):
def forward(self,
hidden_states: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: torch.Tensor = None,
max_seqlen_in_batch: int = None):
total_nnz, _, _ = hidden_states.size() # This is the total_nnz padded after sequence parallel
if self.megatron_config.sequence_parallel:
total_nnz = total_nnz * mpu.get_tensor_model_parallel_world_size()
qkv = self.qkv_proj(hidden_states)[0]
query_states, key_states, value_states = qkv.split([self.q_size, self.k_size, self.v_size],
dim=-1) # (total_nnz, 1, hidden_size)
if self.megatron_config.sequence_parallel:
sequence_parallel_pad = total_nnz - cu_seqlens[-1]
total_nnz = cu_seqlens[-1] # total_nnz before sp padding
query_states = query_states[:total_nnz]
key_states = key_states[:total_nnz]
value_states = value_states[:total_nnz]
# Flash attention requires the input to have the shape
# batch_size x seq_length x head_dime x hidden_dim
# therefore we just need to keep the original shape
query_states = query_states.view(total_nnz, self.num_heads_per_tp, self.head_dim)
key_states = key_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim)
value_states = value_states.view(total_nnz, self.num_key_value_heads_per_tp, self.head_dim)
cos, sin = self.rotary_emb(value_states, seq_len=sequence_length)
cos, sin = cos[:, :cos.shape[1] // 2], sin[:, :sin.shape[1] // 2] # flash attn only needs half
query_states, key_states = apply_rotary_pos_emb_rmpad_flash(query_states,
key_states,
cos,
sin,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen_in_batch)
# query_states, key_states = apply_rotary_pos_emb_rmpad(query_states, key_states, cos, sin, position_ids, indices,
# It is recommended to use dropout with FA according to the docs
# when training.
dropout_rate = 0.0 # if not self.training else self.attn_dropout
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in float16 just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (Qwen2RMSNorm handles it correctly)
input_dtype = query_states.dtype
if input_dtype == torch.float32:
query_states = query_states.to(torch.float16)
key_states = key_states.to(torch.float16)
value_states = value_states.to(torch.float16)
attn_output_unpad = flash_attn_varlen_func(
query_states,
key_states,
value_states,
cu_seqlens_q=cu_seqlens,
cu_seqlens_k=cu_seqlens,
max_seqlen_q=max_seqlen_in_batch,
max_seqlen_k=max_seqlen_in_batch,
dropout_p=dropout_rate,
softmax_scale=None,
causal=True,
)
attn_output_unpad = attn_output_unpad.to(input_dtype)
attn_output_unpad = attn_output_unpad.reshape(total_nnz, 1, self.hidden_size_per_tp).contiguous()
# sequence parallel reduce_scatter is performed inside RowColumnParallel if enabled
# Here we need to repad
if self.megatron_config.sequence_parallel:
attn_output_unpad = F.pad(attn_output_unpad, pad=(0, 0, 0, 0, 0, sequence_parallel_pad))
attn_output_unpad = self.o_proj(attn_output_unpad)[0]
return attn_output_unpad
================================================
FILE: verl/models/qwen2/megatron/layers/parallel_decoder.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import Qwen2Config
from megatron.core import ModelParallelConfig
from .parallel_attention import ParallelQwen2Attention, ParallelQwen2AttentionRmPad
from .parallel_mlp import ParallelQwen2MLP
from .parallel_rmsnorm import ParallelQwen2RMSNorm
class ParallelQwen2DecoderLayer(nn.Module):
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = ParallelQwen2Attention(config=config, megatron_config=megatron_config)
self.mlp = ParallelQwen2MLP(config, megatron_config=megatron_config)
self.input_layernorm = ParallelQwen2RMSNorm(config, megatron_config)
self.post_attention_layernorm = ParallelQwen2RMSNorm(config, megatron_config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Note: sequence parallel is hidden inside ColumnParallelLinear
# reduce scatter is hidden inside RowParallelLinear
# Self Attention
hidden_states = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
)
# TODO: add sequence parallel operator reduce_scatter here
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
# TODO: add sequence parallel operator all_gather here
hidden_states = self.mlp(hidden_states)
# TODO: add sequence parallel operator reduce_scatter here
hidden_states = residual + hidden_states
outputs = hidden_states
return outputs
class ParallelQwen2DecoderLayerRmPad(nn.Module):
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig):
super().__init__()
self.config = config
self.megatron_config = megatron_config
self.hidden_size = config.hidden_size
self.self_attn = ParallelQwen2AttentionRmPad(config=config, megatron_config=megatron_config)
self.mlp = ParallelQwen2MLP(config, megatron_config=megatron_config)
self.input_layernorm = ParallelQwen2RMSNorm(config, megatron_config)
self.post_attention_layernorm = ParallelQwen2RMSNorm(config, megatron_config)
def forward(
self,
hidden_states: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: int = None,
max_seqlen_in_batch: int = None
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
residual = hidden_states # (total_nnz // sp, 1, hidden_size)
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
# (total_nnz // sp, 1, hidden_size) -> all-gather (total_nnz, 1, hidden_size)
# -> col + row -> reduce-scatter -> (total_nnz // sp, 1, hidden_size)
hidden_states = self.self_attn(hidden_states=hidden_states,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch)
hidden_states = residual + hidden_states
# Fully Connected
# shape changes same as attn
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = hidden_states
return outputs
================================================
FILE: verl/models/qwen2/megatron/layers/parallel_linear.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/linear.py
from typing import Optional, Tuple
from megatron.core import tensor_parallel
class QKVParallelLinear(tensor_parallel.ColumnParallelLinear):
def __init__(self,
input_size,
num_heads,
num_key_value_heads,
head_dim,
*,
bias=True,
gather_output=True,
skip_bias_add=False,
**kwargs):
# Keep input parameters, and already restrict the head numbers
self.input_size = input_size
self.q_output_size = num_heads * head_dim
self.kv_output_size = num_key_value_heads * head_dim
self.head_dim = head_dim
self.gather_output = gather_output
self.skip_bias_add = skip_bias_add
input_size = self.input_size
output_size = (num_heads + 2 * num_key_value_heads) * self.head_dim
super().__init__(input_size=input_size,
output_size=output_size,
bias=bias,
gather_output=gather_output,
skip_bias_add=skip_bias_add,
**kwargs)
class MergedColumnParallelLinear(tensor_parallel.ColumnParallelLinear):
def __init__(self,
input_size,
gate_ouput_size,
up_output_size,
*,
bias=True,
gather_output=True,
skip_bias_add=False,
**kwargs):
# Keep input parameters, and already restrict the head numbers
self.input_size = input_size
self.output_size = gate_ouput_size + up_output_size
self.gather_output = gather_output
self.skip_bias_add = skip_bias_add
super().__init__(input_size=self.input_size,
output_size=self.output_size,
bias=bias,
gather_output=gather_output,
skip_bias_add=skip_bias_add,
**kwargs)
================================================
FILE: verl/models/qwen2/megatron/layers/parallel_mlp.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from megatron.core import parallel_state as mpu
from megatron.core import tensor_parallel
from megatron.core import ModelParallelConfig
from torch import nn
from transformers.activations import ACT2FN
from verl.models.qwen2.megatron.layers.parallel_linear import MergedColumnParallelLinear
from verl.utils.megatron import tensor_parallel as tp_utils
class ParallelQwen2MLP(nn.Module):
def __init__(self, config, megatron_config: ModelParallelConfig = None) -> None:
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
# The weight is only [hidden_size, intermediate_size // model_parallel_world_size]
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
row_kwargs = tp_utils.get_default_kwargs_for_row_parallel_linear()
if megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
assert row_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(row_kwargs, megatron_config)
tp_utils.update_kwargs_with_config(column_kwargs, megatron_config)
tp_size = mpu.get_tensor_model_parallel_world_size()
self.gate_up_proj = MergedColumnParallelLinear(
input_size=self.hidden_size,
gate_ouput_size=self.intermediate_size,
up_output_size=self.intermediate_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs,
)
self.gate_size = self.intermediate_size // tp_size
self.down_proj = tensor_parallel.RowParallelLinear(input_size=self.intermediate_size,
output_size=self.hidden_size,
bias=False,
input_is_parallel=True,
skip_bias_add=False,
**row_kwargs)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
gate_up = self.gate_up_proj(x)[0]
gate, up = gate_up.split(self.gate_size, dim=-1)
return self.down_proj(self.act_fn(gate) * up)[0]
================================================
FILE: verl/models/qwen2/megatron/layers/parallel_rmsnorm.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
import torch
from megatron.core import ModelParallelConfig
from torch import nn
from transformers import Qwen2Config
from apex.normalization.fused_layer_norm import fused_rms_norm_affine
from verl.utils.megatron import sequence_parallel as sp_utils
class ParallelQwen2RMSNorm(nn.Module):
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig):
"""
Qwen2RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
if isinstance(config.hidden_size, numbers.Integral):
normalized_shape = (config.hidden_size,)
self.normalized_shape = torch.Size(normalized_shape)
self.weight = nn.Parameter(torch.ones(self.normalized_shape))
self.variance_epsilon = config.rms_norm_eps
if megatron_config.sequence_parallel:
sp_utils.mark_parameter_as_sequence_parallel(self.weight)
def forward(self, hidden_states):
return fused_rms_norm_affine(input=hidden_states,
weight=self.weight,
normalized_shape=self.normalized_shape,
eps=self.variance_epsilon,
memory_efficient=True)
================================================
FILE: verl/models/qwen2/megatron/modeling_qwen2_megatron.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch Qwen2 model."""
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from megatron.core import tensor_parallel, parallel_state
from megatron.core import ModelParallelConfig
from torch import nn
from transformers.modeling_outputs import BaseModelOutputWithPast
from transformers.models.qwen2.configuration_qwen2 import Qwen2Config
from transformers.models.qwen2.modeling_qwen2 import CausalLMOutputWithPast
from verl.utils.megatron import sequence_parallel as sp_utils
from verl.utils.megatron import tensor_parallel as tp_utils
from .layers import ParallelQwen2DecoderLayer, ParallelQwen2RMSNorm, ParallelQwen2DecoderLayerRmPad
"""
TODO:
1. Add weight initialization. Here we need to be careful on TP weight init.
2. Add sequence parallel
3. Load checkpoint from Qwen2 pretrained checkpoint
"""
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
class ParallelQwen2Model(nn.Module):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
Args:
config: Qwen2Config
"""
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig):
super().__init__()
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding()
if megatron_config is not None:
assert embedding_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config)
self.embed_tokens = tensor_parallel.VocabParallelEmbedding(num_embeddings=config.vocab_size,
embedding_dim=config.hidden_size,
**embedding_kwargs)
self.layers = nn.ModuleList(
[ParallelQwen2DecoderLayer(config, megatron_config) for _ in range(config.num_hidden_layers)])
self.norm = ParallelQwen2RMSNorm(config, megatron_config)
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype,
tgt_len=input_shape[-1]).to(inputs_embeds.device)
combined_attention_mask = (expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask +
combined_attention_mask)
return combined_attention_mask
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Union[Tuple, BaseModelOutputWithPast]:
"""
Args:
input_ids: input ids. shape (batch_size, seq_length)
attention_mask: attention_mask. shape (batch_size, seq_length)
position_ids: position ids. shape (batch_size, seq_length)
Returns:
"""
batch_size, seq_length = input_ids.shape
inputs_embeds = self.embed_tokens(input_ids)
# embed positions
attention_mask = self._prepare_decoder_attention_mask(attention_mask, (batch_size, seq_length), inputs_embeds)
hidden_states = inputs_embeds
for idx, decoder_layer in enumerate(self.layers):
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
)
hidden_states = layer_outputs
hidden_states = self.norm(hidden_states)
return hidden_states
class ParallelQwen2ForCausalLM(nn.Module):
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig):
super().__init__()
self.model = ParallelQwen2Model(config, megatron_config=megatron_config)
self.vocab_size = config.vocab_size
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = tensor_parallel.ColumnParallelLinear(input_size=config.hidden_size,
output_size=config.vocab_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs)
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
```"""
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
)
hidden_states = outputs
logits = self.lm_head(hidden_states)[0]
logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits)
logits = logits.float()
return CausalLMOutputWithPast(
loss=None,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
class ParallelQwen2ModelRmPad(nn.Module):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
Args:
config: Qwen2Config
"""
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig):
super().__init__()
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding()
self.megatron_config = megatron_config
if megatron_config is not None:
assert embedding_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config)
self.embed_tokens = tensor_parallel.VocabParallelEmbedding(num_embeddings=config.vocab_size,
embedding_dim=config.hidden_size,
**embedding_kwargs)
self.layers = nn.ModuleList(
[ParallelQwen2DecoderLayerRmPad(config, megatron_config) for _ in range(config.num_hidden_layers)])
self.norm = ParallelQwen2RMSNorm(config, megatron_config)
def forward(self,
input_ids: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: int = None,
max_seqlen_in_batch: int = None) -> Union[Tuple, BaseModelOutputWithPast]:
"""
Args:
input_ids: input ids. shape (1, totol_nnz)
position_ids: position ids. shape (batch_size, seq_length)
Returns:
"""
inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size)
# (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size)
inputs_embeds = inputs_embeds.transpose(0, 1)
if self.megatron_config.sequence_parallel:
inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds)
hidden_states = inputs_embeds
for idx, decoder_layer in enumerate(self.layers):
layer_outputs = decoder_layer(hidden_states,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch)
hidden_states = layer_outputs
hidden_states = self.norm(hidden_states)
return hidden_states
class ParallelQwen2ForCausalLMRmPad(nn.Module):
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig):
super().__init__()
self.config = config
self.megatron_config = megatron_config
self.model = ParallelQwen2ModelRmPad(config, megatron_config=megatron_config)
self.vocab_size = config.vocab_size
self._init_head()
def _init_head(self):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = tensor_parallel.ColumnParallelLinear(input_size=self.config.hidden_size,
output_size=self.config.vocab_size,
bias=False,
gather_output=False,
skip_bias_add=False,
**column_kwargs)
def _forward_head(self, hidden_states):
# all_gather from sequence parallel region is performed inside lm_head
logits = self.lm_head(hidden_states)[0]
logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp)
logits = tensor_parallel.gather_from_tensor_model_parallel_region(logits) # (total_nnz_padded, 1, vocab_size)
return logits
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
```"""
batch_size, sequence_length = input_ids.shape
# remove padding here
input_ids, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input(input_ids.unsqueeze(dim=-1),
attention_mask) # (total_nnz, 1)
# pad input_ids to multiple of tp for all tp ranks
# TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap
if self.megatron_config.sequence_parallel:
input_ids = sp_utils.pad_to_sequence_parallel(input_ids)
input_ids = input_ids.transpose(0, 1) # (1, total_nnz+pad)
outputs = self.model(input_ids=input_ids,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch)
hidden_states = outputs
logits = self._forward_head(hidden_states)
# remove padding from sequence parallel
if self.megatron_config.sequence_parallel:
totol_nnz = cu_seqlens[-1]
logits = logits[:totol_nnz] # (total_nnz_padded)
logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension
# add removed padding back
logits = pad_input(logits, indices, batch_size,
seqlen=sequence_length) # (batch_size, sequence_length, vocab_size)
return CausalLMOutputWithPast(
loss=None,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
class ParallelQwen2ForValueRmPad(ParallelQwen2ForCausalLMRmPad):
def _init_head(self):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = nn.Linear(in_features=self.config.hidden_size, out_features=1, bias=False)
# lm_head is effectively the same as sequence parallel
sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight)
def _forward_head(self, hidden_states):
logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1)
logits = logits.float()
if self.megatron_config.sequence_parallel:
logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False)
return logits
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
output = super().forward(input_ids, attention_mask, position_ids)
output.logits = torch.squeeze(output.logits, dim=-1)
return output
"""
Support pipeline parallelism
"""
class ParallelQwen2ModelRmPadPP(nn.Module):
"""
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Qwen2DecoderLayer`]
This model definition supports pipeline parallelism. To support pp and vpp,
- This model only contains layer in this pp stage and vpp chunk
- When calling get_model in Megatron, this rank will instantiate all the vpp chunks in this pp.
Args:
config: Qwen2Config
"""
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig, pre_process, post_process):
super().__init__()
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.pre_process = pre_process
self.post_process = post_process
self.megatron_config = megatron_config
embedding_kwargs = tp_utils.get_default_kwargs_for_parallel_embedding()
if megatron_config is not None:
assert embedding_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(embedding_kwargs, self.megatron_config)
if pre_process:
self.embed_tokens = tensor_parallel.VocabParallelEmbedding(num_embeddings=config.vocab_size,
embedding_dim=config.hidden_size,
**embedding_kwargs)
else:
self.embed_tokens = None
# pp_rank = megatron_config.pipeline_model_parallel_rank
pp_size = megatron_config.pipeline_model_parallel_size
self.num_layer_per_pp = config.num_hidden_layers // pp_size
vpp_size = megatron_config.virtual_pipeline_model_parallel_size
if vpp_size is not None:
self.num_layer_vpp_chunk = self.num_layer_per_pp // vpp_size
self.num_layer_this_model = self.num_layer_vpp_chunk
# vpp_rank = megatron_config.virtual_pipeline_model_parallel_rank
# self.offset = vpp_rank * (
# config.num_hidden_layers // megatron_config.virtual_pipeline_model_parallel_size) + \
# (megatron_config.pipeline_model_parallel_rank * self.num_layer_vpp_chunk)
else:
self.num_layer_this_model = self.num_layer_per_pp
# self.offset = pp_rank * self.num_layer_per_pp
layers = []
for i in range(self.num_layer_this_model):
layer = ParallelQwen2DecoderLayerRmPad(config, megatron_config)
# setattr(layer, 'hidden_layer_index', self.offset + i)
layers.append(layer)
self.layers = nn.ModuleList(layers)
if post_process:
self.norm = ParallelQwen2RMSNorm(config, megatron_config)
else:
self.norm = None
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
self.input_tensor = input_tensor
def forward(self,
input_ids: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
sequence_length: int = None,
indices: torch.Tensor = None,
cu_seqlens: int = None,
max_seqlen_in_batch: int = None) -> Union[Tuple, BaseModelOutputWithPast]:
"""
Args:
input_ids: input ids. shape (1, totol_nnz)
position_ids: position ids. shape (batch_size, seq_length)
Returns:
"""
if self.pre_process:
inputs_embeds = self.embed_tokens(input_ids) # (1, total_nnz) -> (1, total_nnz, hidden_size)
# vocab parallel embedding will not do sequence parallel reduce-scatter in open source megatron
# so need to deal with it by handle here:
# (1, total_nnz, hidden_size) -> (total_nnz, 1, hidden_size) -> (total_nnz // sp, 1, hidden_size)
inputs_embeds = inputs_embeds.transpose(0, 1)
if self.megatron_config.sequence_parallel:
inputs_embeds = tensor_parallel.scatter_to_sequence_parallel_region(inputs_embeds)
hidden_states = inputs_embeds
else:
# self.hidden_states should be passed by Megatron
hidden_states = self.input_tensor
for idx, decoder_layer in enumerate(self.layers):
layer_outputs = decoder_layer(hidden_states,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch)
hidden_states = layer_outputs
if self.post_process:
hidden_states = self.norm(hidden_states)
return hidden_states
class ParallelQwen2ForCausalLMRmPadPP(nn.Module):
def __init__(self, config: Qwen2Config, megatron_config: ModelParallelConfig, pre_process, post_process,
share_embeddings_and_output_weights):
super().__init__()
self.config = config
self.megatron_config = megatron_config
self.model = ParallelQwen2ModelRmPadPP(config,
megatron_config=megatron_config,
pre_process=pre_process,
post_process=post_process)
self.share_embeddings_and_output_weights = share_embeddings_and_output_weights
self.vocab_size = config.vocab_size
self.pre_process = pre_process
self.post_process = post_process
if post_process:
self._init_head()
if pre_process or post_process:
self.setup_embeddings_and_output_layer()
def set_input_tensor(self, input_tensor):
"""Set input tensor to be used instead of forward()'s input.
When doing pipeline parallelism the input from the previous
stage comes from communication, not from the input, so the
model's forward_step_func won't have it. This function is thus
used by internal code to bypass the input provided by the
forward_step_func"""
assert len(input_tensor) == 1
self.model.set_input_tensor(input_tensor[0])
def _init_head(self):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = tensor_parallel.ColumnParallelLinear(input_size=self.config.hidden_size,
output_size=self.config.vocab_size,
bias=False,
gather_output=False,
skip_bias_add=False,
skip_weight_param_allocation=self.pre_process and
self.share_embeddings_and_output_weights,
**column_kwargs)
def setup_embeddings_and_output_layer(self) -> None:
"""Sets up embedding layer in first stage and output layer in last stage.
This function initalizes word embeddings in the final stage when we are
using pipeline parallelism and sharing word embeddings, and sets up param
attributes on the embedding and output layers.
"""
# Set `is_embedding_or_output_parameter` attribute.
if self.pre_process:
self.model.embed_tokens.weight.is_embedding_or_output_parameter = True
if self.post_process and self.lm_head.weight is not None:
self.lm_head.weight.is_embedding_or_output_parameter = True
if not self.share_embeddings_and_output_weights:
return
if parallel_state.get_pipeline_model_parallel_world_size() == 1:
# Zero out wgrad if sharing embeddings between two layers on same
# pipeline stage to make sure grad accumulation into main_grad is
# correct and does not include garbage values (e.g., from torch.empty).
self.shared_embedding_or_output_weight().zero_out_wgrad = True
return
if parallel_state.is_pipeline_first_stage() and self.pre_process and not self.post_process:
self.shared_embedding_or_output_weight().shared_embedding = True
if self.post_process and not self.pre_process:
assert not parallel_state.is_pipeline_first_stage()
# set word_embeddings weights to 0 here, then copy first
# stage's weights using all_reduce below.
self.lm_head.weight.data.fill_(0)
self.lm_head.weight.shared = True
self.lm_head.weight.shared_embedding = True
if torch.distributed.is_initialized():
if parallel_state.is_rank_in_embedding_group():
weight = self.shared_embedding_or_output_weight()
weight.data = weight.data.cuda()
torch.distributed.all_reduce(weight.data, group=parallel_state.get_embedding_group())
def shared_embedding_or_output_weight(self) -> torch.Tensor:
if self.pre_process:
return self.model.embed_tokens.weight
elif self.post_process:
return self.lm_head.weight
return None
def _forward_head(self, hidden_states):
# all_gather from sequence parallel region is performed inside lm_head
# print(f'logits shape before forward_head: {hidden_states.shape}, vocab_size = {self.config.vocab_size}') # [4, 32, 4096]
output_weight = None
if self.share_embeddings_and_output_weights:
output_weight = self.shared_embedding_or_output_weight()
logits = self.lm_head(hidden_states, weight=output_weight)[0]
# print(f'logits shape after forward_head: {logits.shape}') # [8, 32, 8]
logits = logits.float() # (total_nnz_padded, 1, vocab_size // tp)
return logits
def forward(
self,
# original input
*,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
r"""
Args:
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Returns:
```"""
# Note that input_ids, attention_mask and position_ids should be passed to every pp layer.
# In the first pp, input_ids will be used, in other pp layers hidden_states will be used inside self.model
batch_size, sequence_length = input_ids.shape
# remove padding here
input_ids_rmpad, indices, cu_seqlens, max_seqlen_in_batch, *_ = unpad_input(input_ids.unsqueeze(dim=-1),
attention_mask) # (total_nnz, 1)
# pad input_ids to multiple of tp for all tp ranks
# TODO: for better performance, the sp padding should be removed at each layer. Not sure the performance gap
if self.megatron_config.sequence_parallel:
input_ids_rmpad = sp_utils.pad_to_sequence_parallel(input_ids_rmpad)
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz+pad)
outputs = self.model(input_ids=input_ids_rmpad,
position_ids=position_ids,
sequence_length=sequence_length,
indices=indices,
cu_seqlens=cu_seqlens,
max_seqlen_in_batch=max_seqlen_in_batch)
if self.post_process:
hidden_states = outputs
logits = self._forward_head(hidden_states)
logits = torch.squeeze(logits, dim=1) # remove the artificial batch dimension # torch.Size([8, 32, 16])
# remove padding from sequence parallel
if self.megatron_config.sequence_parallel:
totol_nnz = cu_seqlens[-1]
logits = logits[:totol_nnz] # (total_nnz_padded)
# add removed padding back. If input is already rmpad, we let the caller pad_input
logits = pad_input(logits, indices, batch_size,
seqlen=sequence_length) # (batch_size, sequence_length, vocab_size)
return CausalLMOutputWithPast(
loss=None,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
else:
return outputs
class ParallelQwen2ForValueRmPadPP(ParallelQwen2ForCausalLMRmPadPP):
def _init_head(self):
column_kwargs = tp_utils.get_default_kwargs_for_column_parallel_linear()
if self.megatron_config is not None:
assert column_kwargs.get('config', False), 'must have ModelParallelConfig'
tp_utils.update_kwargs_with_config(column_kwargs, self.megatron_config)
self.lm_head = nn.Linear(in_features=self.config.hidden_size, out_features=1, bias=False)
# lm_head is effectively the same as sequence parallel
sp_utils.mark_parameter_as_sequence_parallel(self.lm_head.weight)
def _forward_head(self, hidden_states):
logits = self.lm_head(hidden_states) # (total_nnz_padded // tp, 1, 1)
logits = logits.float()
if self.megatron_config.sequence_parallel:
logits = tensor_parallel.gather_from_sequence_parallel_region(logits, tensor_parallel_output_grad=False)
return logits
def forward(
self,
*,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
) -> Union[Tuple, CausalLMOutputWithPast]:
output = super().forward(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids)
if self.post_process:
output.logits = torch.squeeze(output.logits, dim=-1)
return output
else:
return output
================================================
FILE: verl/models/registry.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from typing import List, Optional, Type
import torch.nn as nn
# Supported models using HF Rmpad
# TODO(sgm): HF may supported more than listed here, we should add more after testing
_MODELS_SUPPORT_RMPAD = {'llama', 'mistral', 'gemma', 'qwen2', 'qwen2_vl', 'qwen2_5_vl'}
def check_model_support_rmpad(model_type: str):
assert isinstance(model_type, str)
if not model_type in _MODELS_SUPPORT_RMPAD:
raise ValueError(f"Model architecture {model_type} is not supported for now. "
f"RMPad supported architectures: {_MODELS_SUPPORT_RMPAD}."
f"Please set `use_remove_padding=False` in the model config.")
if model_type in ("qwen2_vl", "qwen2_5_vl"): # patch remove padding for qwen2vl mrope
from verl.models.transformers.qwen2_vl import ulysses_flash_attn_forward
from transformers.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLFlashAttention2
from transformers.models.qwen2_5_vl.modeling_qwen2_5_vl import Qwen2_5_VLFlashAttention2
Qwen2VLFlashAttention2.forward = ulysses_flash_attn_forward
Qwen2_5_VLFlashAttention2.forward = ulysses_flash_attn_forward
print("Qwen2vl patch applied!")
# Supported models in Megatron-LM
# Architecture -> (module, class).
_MODELS = {
"LlamaForCausalLM":
("llama", ("ParallelLlamaForCausalLMRmPadPP", "ParallelLlamaForValueRmPadPP", "ParallelLlamaForCausalLMRmPad")),
"Qwen2ForCausalLM":
("qwen2", ("ParallelQwen2ForCausalLMRmPadPP", "ParallelQwen2ForValueRmPadPP", "ParallelQwen2ForCausalLMRmPad")),
"MistralForCausalLM": ("mistral", ("ParallelMistralForCausalLMRmPadPP", "ParallelMistralForValueRmPadPP",
"ParallelMistralForCausalLMRmPad"))
}
# return model class
class ModelRegistry:
@staticmethod
def load_model_cls(model_arch: str, value=False) -> Optional[Type[nn.Module]]:
if model_arch not in _MODELS:
return None
megatron = "megatron"
module_name, model_cls_name = _MODELS[model_arch]
if not value: # actor/ref
model_cls_name = model_cls_name[0]
elif value: # critic/rm
model_cls_name = model_cls_name[1]
module = importlib.import_module(f"verl.models.{module_name}.{megatron}.modeling_{module_name}_megatron")
return getattr(module, model_cls_name, None)
@staticmethod
def get_supported_archs() -> List[str]:
return list(_MODELS.keys())
================================================
FILE: verl/models/transformers/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/models/transformers/llama.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Optional, Tuple, Callable
import sys
if sys.version_info >= (3, 11):
from typing import Unpack
else:
from typing_extensions import Unpack
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
from transformers.cache_utils import Cache
from transformers.utils import logging
from transformers.modeling_flash_attention_utils import _flash_attention_forward
from verl.utils.ulysses import gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_world_size
logger = logging.get_logger(__name__)
def llama_flash_attn_forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""
Adapted from transformers 4.47.1 to support Ulysses sequence parallelism.
NOTE: This function is used for transformers versions in the range [4.45.0, 4.47.1].
"""
output_attentions = False
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
# Flash attention requires the input to have the shape
# batch_size x seq_length x head_dim x hidden_dim
# therefore we just need to keep the original shape
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
# trade off: repeat first and then all to all
# key_states = repeat_kv(key_states, self.num_key_value_groups)
# value_states = repeat_kv(value_states, self.num_key_value_groups)
########## AlltoAll for Ulysses ##########
ulysses_sp_size = get_ulysses_sequence_parallel_world_size()
if ulysses_sp_size > 1:
# (bsz, n_head, seq_len/n, head_dim) -> (bsz, n_head/n, seq_len, head_dim)
query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1)
key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1)
value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1)
full_q_len = query_states.size(2) # full seq length
if position_embeddings is None:
logger.warning_once(
"The attention layers in this model are transitioning from computing the RoPE embeddings internally "
"through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
"`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
"removed and `position_embeddings` will be mandatory.")
cos, sin = self.rotary_emb(value_states, position_ids)
else:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
# TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
# to be able to avoid many of these transpose/reshape/view.
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
dropout_rate = self.attention_dropout if self.training else 0.0
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in the correct dtype just to be sure everything works as expected.
# This might slowdown training & inference so it is recommended to not cast the LayerNorms
# in fp32. (LlamaRMSNorm handles it correctly)
input_dtype = query_states.dtype
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_gpu_dtype()
# Handle the case where the model is quantized
elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
f" {target_dtype}.")
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
attn_output = _flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
full_q_len,
position_ids=position_ids,
dropout=dropout_rate,
sliding_window=getattr(self, "sliding_window", None),
use_top_left_mask=self._flash_attn_uses_top_left_mask,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous()
########## AlltoAll for Ulysses ##########
if ulysses_sp_size > 1:
attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights, past_key_value
def llama_attn_forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""
Adapted from transformers 4.49.0 to support Ulysses sequence parallelism for transformers >= 4.48.0.
NOTE: This function has been tested only on transformers versions between 4.48.0 and 4.49.0.
"""
from transformers.models.llama.modeling_llama import eager_attention_forward
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
bsz, q_len, _ = hidden_states.shape
query_states = self.q_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
########## AlltoAll for Ulysses ##########
ulysses_sp_size = get_ulysses_sequence_parallel_world_size()
if ulysses_sp_size > 1:
query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1)
key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1)
value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1)
full_q_len = query_states.size(2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
logger.warning_once(
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
)
else:
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous()
########## AlltoAll for Ulysses ##########
if ulysses_sp_size > 1:
attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
================================================
FILE: verl/models/transformers/monkey_patch.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Apply monkey-patch function to models
"""
def apply_monkey_patch_to_llama():
if is_transformers_version_in_range("4.45.0", "4.47.1"):
from transformers.models.llama.modeling_llama import LlamaFlashAttention2
from verl.models.transformers.llama import llama_flash_attn_forward
LlamaFlashAttention2.forward = llama_flash_attn_forward
elif is_transformers_version_in_range("4.48.0", "4.49.0"):
from transformers.models.llama.modeling_llama import LlamaAttention
from verl.models.transformers.llama import llama_attn_forward
LlamaAttention.forward = llama_attn_forward
def apply_monkey_patch_to_qwen2():
if is_transformers_version_in_range("4.45.0", "4.47.1"):
from transformers.models.qwen2.modeling_qwen2 import Qwen2FlashAttention2
from verl.models.transformers.qwen2 import qwen2_flash_attn_forward
Qwen2FlashAttention2.forward = qwen2_flash_attn_forward
elif is_transformers_version_in_range("4.48.0", "4.49.0"):
from transformers.models.qwen2.modeling_qwen2 import Qwen2Attention
from verl.models.transformers.qwen2 import qwen2_attn_forward
Qwen2Attention.forward = qwen2_attn_forward
_PATCH_NAME_TO_FUNC = {
'llama': apply_monkey_patch_to_llama,
'qwen2': apply_monkey_patch_to_qwen2,
}
from transformers import PretrainedConfig
def apply_monkey_patch(config: PretrainedConfig, verbose=True):
if not is_transformers_version_in_range("4.45.0", "4.49.0"):
raise AssertionError("The installed `transformers` version doesn't support ulysses patch. "
"Please install a version between 4.45.0 and 4.49.0 to use this ulysses feature.")
success_apply_monkey_patch = False
if config.model_type in _PATCH_NAME_TO_FUNC:
_PATCH_NAME_TO_FUNC[config.model_type]()
success_apply_monkey_patch = True
if success_apply_monkey_patch and verbose:
print(f'Applying monkey patch to model {config.model_type}')
elif not success_apply_monkey_patch:
raise NotImplementedError(f'Ulysses for model {config.model_type} is not implemented, \
please set `ulysses_sequence_parallel_size=1`')
return success_apply_monkey_patch
from functools import lru_cache
from packaging import version
import importlib.metadata
@lru_cache()
def is_transformers_version_in_range(min_version: str, max_version: str) -> bool:
try:
# Get the installed version of the transformers library
transformers_version = importlib.metadata.version("transformers")
except importlib.metadata.PackageNotFoundError:
raise ModuleNotFoundError("The `transformers` package is not installed.")
# Check if the version is within the specified range
return version.parse(min_version) <= version.parse(transformers_version) <= version.parse(max_version)
================================================
FILE: verl/models/transformers/qwen2.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Optional, Tuple, Callable
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb, repeat_kv
from transformers.cache_utils import Cache
from transformers.utils import logging
from transformers.modeling_flash_attention_utils import _flash_attention_forward
from transformers.processing_utils import Unpack
from verl.utils.ulysses import gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_world_size
logger = logging.get_logger(__name__)
def qwen2_flash_attn_forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_value: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
):
"""
Adapted from transformers 4.47.1 to support Ulysses sequence parallelism.
NOTE: This function is only tested on transformers versions between 4.45.0 and 4.47.1.
"""
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
########## AlltoAll for Ulysses ##########
ulysses_sp_size = get_ulysses_sequence_parallel_world_size()
if ulysses_sp_size > 1:
# (bsz, n_head, seq_len/n, head_dim) -> (bsz, n_head/n, seq_len, head_dim)
query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1)
key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1)
value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1)
full_q_len = query_states.size(2) # full seq length
if position_embeddings is None:
logger.warning_once(
"The attention layers in this model are transitioning from computing the RoPE embeddings internally "
"through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
"`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
"removed and `position_embeddings` will be mandatory.")
cos, sin = self.rotary_emb(value_states, position_ids)
else:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
# repeat k/v heads if n_kv_heads < n_heads
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
dropout_rate = 0.0 if not self.training else self.attention_dropout
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in float16 just to be sure everything works as expected.
input_dtype = query_states.dtype
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_gpu_dtype()
# Handle the case where the model is quantized
elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
f" {target_dtype}.")
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
# Reashape to the expected shape for Flash Attention
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
if (self.config.use_sliding_window and getattr(self.config, "sliding_window", None) is not None and
self.layer_idx >= self.config.max_window_layers):
sliding_window = self.config.sliding_window
else:
sliding_window = None
attn_output = _flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
full_q_len,
position_ids=position_ids,
dropout=dropout_rate,
sliding_window=sliding_window,
is_causal=self.is_causal,
use_top_left_mask=self._flash_attn_uses_top_left_mask,
)
# use full_q_len to reshape
attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous()
########## AlltoAll for Ulysses ##########
if ulysses_sp_size > 1:
attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights, past_key_value
def qwen2_attn_forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""
Adapted from transformers 4.49.0 to support Ulysses sequence parallelism for transformers >= 4.48.0.
NOTE: This function has been tested only on transformers versions between 4.48.0 and 4.49.0.
"""
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
bsz, q_len, _ = hidden_states.shape
hidden_shape = (bsz, q_len, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
########## AlltoAll for Ulysses ##########
ulysses_sp_size = get_ulysses_sequence_parallel_world_size()
if ulysses_sp_size > 1:
# (bsz, n_head, seq_len/n, head_dim) -> (bsz, n_head/n, seq_len, head_dim)
query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1)
key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1)
value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1)
full_q_len = query_states.size(2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_value is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
sliding_window = None
if (self.config.use_sliding_window and getattr(self.config, "sliding_window", None) is not None and
self.layer_idx >= self.config.max_window_layers):
sliding_window = self.config.sliding_window
from transformers.models.qwen2.modeling_qwen2 import eager_attention_forward
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
logger.warning_once(
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
)
else:
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=sliding_window, # main diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(bsz, full_q_len, -1, self.head_dim).contiguous()
########## AlltoAll for Ulysses ##########
if ulysses_sp_size > 1:
# (bsz, seq_len, n_head/n, head_dim) -> (bsz, seq_len/n, n_head, head_dim)
attn_output = gather_heads_scatter_seq(attn_output, seq_dim=1, head_dim=2)
attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
================================================
FILE: verl/models/transformers/qwen2_vl.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import inspect
import torch
import os
from transformers.utils import is_flash_attn_greater_or_equal
from transformers.modeling_flash_attention_utils import _flash_attention_forward
from verl.utils.ulysses import gather_heads_scatter_seq, gather_seq_scatter_heads, get_ulysses_sequence_parallel_world_size
try:
from flash_attn import flash_attn_func, flash_attn_varlen_func
_flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
except ImportError:
flash_attn_varlen_func = None
def get_rope_index(
processor,
input_ids: torch.Tensor,
image_grid_thw: Optional[torch.Tensor] = None,
video_grid_thw: Optional[torch.Tensor] = None,
second_per_grid_ts: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Gets the position ids for Qwen2-VL, it should be generated before sharding the sequence.
The batch dim has been removed and the input_ids should be a 1D tensor representing a single example.
https://github.com/huggingface/transformers/blob/v4.49.0/src/transformers/models/qwen2_5_vl/modeling_qwen2_5_vl.py#L1546
"""
spatial_merge_size = processor.image_processor.merge_size
tokens_per_second = 2
image_token_id = processor.tokenizer.convert_tokens_to_ids("<|image_pad|>")
video_token_id = processor.tokenizer.convert_tokens_to_ids("<|video_pad|>")
vision_start_token_id = processor.tokenizer.convert_tokens_to_ids("<|vision_start|>")
if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
position_ids = torch.ones(3, input_ids.size(0), dtype=input_ids.dtype, device=input_ids.device) # (3, seqlen)
image_index, video_index = 0, 0
input_ids = input_ids[attention_mask == 1]
image_nums, video_nums = 0, 0
vision_start_indices = torch.argwhere(input_ids == vision_start_token_id)
vision_tokens = input_ids[vision_start_indices + 1]
image_nums = (vision_tokens == image_token_id).sum()
video_nums = (vision_tokens == video_token_id).sum()
input_tokens = input_ids.tolist()
llm_pos_ids_list: list = []
st = 0
remain_images, remain_videos = image_nums, video_nums
for _ in range(image_nums + video_nums):
if image_token_id in input_tokens and remain_images > 0:
ed_image = input_tokens.index(image_token_id, st)
else:
ed_image = len(input_tokens) + 1
if video_token_id in input_tokens and remain_videos > 0:
ed_video = input_tokens.index(video_token_id, st)
else:
ed_video = len(input_tokens) + 1
if ed_image < ed_video:
t, h, w = (
image_grid_thw[image_index][0],
image_grid_thw[image_index][1],
image_grid_thw[image_index][2],
)
second_per_grid_t = 0
image_index += 1
remain_images -= 1
ed = ed_image
else:
t, h, w = (
video_grid_thw[video_index][0],
video_grid_thw[video_index][1],
video_grid_thw[video_index][2],
)
if second_per_grid_ts is not None:
second_per_grid_t = second_per_grid_ts[video_index]
else:
second_per_grid_t = 1.0
video_index += 1
remain_videos -= 1
ed = ed_video
llm_grid_t, llm_grid_h, llm_grid_w = (
t.item(),
h.item() // spatial_merge_size,
w.item() // spatial_merge_size,
)
text_len = ed - st
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
t_index = torch.arange(llm_grid_t).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w)
t_index = (t_index * second_per_grid_t * tokens_per_second).long().flatten()
h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(llm_grid_t, -1, llm_grid_w).flatten()
w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(llm_grid_t, llm_grid_h, -1).flatten()
llm_pos_ids_list.append(torch.stack([t_index, h_index, w_index]) + text_len + st_idx)
st = ed + llm_grid_t * llm_grid_h * llm_grid_w
if st < len(input_tokens):
st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
text_len = len(input_tokens) - st
llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
position_ids[..., attention_mask == 1] = llm_positions.to(position_ids.device)
else:
if attention_mask is not None:
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
position_ids = position_ids.unsqueeze(0).expand(3, -1).to(input_ids.device)
else:
position_ids = torch.arange(input_ids.shape[1], device=input_ids.device).view(1, -1).expand(3, -1)
return position_ids
def prepare_fa2_from_position_ids(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor,
position_ids: torch.Tensor):
query = query.view(-1, query.size(-2), query.size(-1))
key = key.view(-1, key.size(-2), key.size(-1))
value = value.view(-1, value.size(-2), value.size(-1))
position_ids = position_ids.flatten()
indices_q = torch.arange(position_ids.size(0), device=position_ids.device, dtype=torch.int32)
cu_seqlens = torch.cat((
indices_q[position_ids == 0],
torch.tensor(position_ids.size(), device=position_ids.device, dtype=torch.int32),
))
max_length = cu_seqlens.diff().max() # use cu_seqlens to infer max_length for qwen2vl mrope
return (query, key, value, indices_q, (cu_seqlens, cu_seqlens), (max_length, max_length))
def flash_attention_forward(
query_states: torch.Tensor,
key_states: torch.Tensor,
value_states: torch.Tensor,
attention_mask: torch.Tensor,
query_length: int,
is_causal: bool = True,
position_ids: Optional[torch.Tensor] = None,
sliding_window: Optional[int] = None,
use_top_left_mask: bool = False,
deterministic: Optional[bool] = None,
**kwargs,
):
"""
Patches flash attention forward to handle 3D position ids in mrope. (3, batch_size, seq_length)
"""
if not use_top_left_mask:
causal = is_causal
else:
causal = is_causal and query_length != 1
# Assuming 4D tensors, key_states.shape[1] is the key/value sequence length (source length).
use_sliding_windows = (_flash_supports_window_size and sliding_window is not None and
key_states.shape[1] > sliding_window)
flash_kwargs = {"window_size": (sliding_window, sliding_window)} if use_sliding_windows else {}
if is_flash_attn_greater_or_equal("2.4.1"):
if deterministic is None:
deterministic = os.environ.get("FLASH_ATTENTION_DETERMINISTIC", "0") == "1"
flash_kwargs["deterministic"] = deterministic
if position_ids is not None and query_length != 1 and not (torch.diff(position_ids[0], dim=-1) >= 0).all():
batch_size = query_states.size(0)
query_states, key_states, value_states, _, cu_seq_lens, max_seq_lens = prepare_fa2_from_position_ids(
query_states, key_states, value_states, position_ids[0]) # remove channel dimension
cu_seqlens_q, cu_seqlens_k = cu_seq_lens
max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
attn_output = flash_attn_varlen_func(
query_states,
key_states,
value_states,
cu_seqlens_q=cu_seqlens_q,
cu_seqlens_k=cu_seqlens_k,
max_seqlen_q=max_seqlen_in_batch_q,
max_seqlen_k=max_seqlen_in_batch_k,
dropout_p=kwargs.pop("dropout", 0.0),
softmax_scale=kwargs.pop("softmax_scale", None),
causal=causal,
**flash_kwargs,
)
attn_output = attn_output.view(batch_size, -1, attn_output.size(-2), attn_output.size(-1))
else:
attn_output = _flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
query_length,
is_causal=is_causal,
sliding_window=sliding_window,
use_top_left_mask=use_top_left_mask,
deterministic=deterministic,
**kwargs,
) # do not pass position_ids to old flash_attention_forward
return attn_output
def ulysses_flash_attn_forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
**kwargs,
) -> Tuple[torch.Tensor, None, None]:
from transformers.models.qwen2_vl.modeling_qwen2_vl import repeat_kv, apply_multimodal_rotary_pos_emb
bsz, q_len, _ = hidden_states.size() # q_len = seq_length / sp_size
query_states = self.q_proj(hidden_states) # (batch_size, seq_length / sp_size, num_heads * head_size)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
ulysses_sp_size = get_ulysses_sequence_parallel_world_size()
if ulysses_sp_size > 1:
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
query_states = gather_seq_scatter_heads(query_states, seq_dim=2, head_dim=1)
key_states = gather_seq_scatter_heads(key_states, seq_dim=2, head_dim=1)
value_states = gather_seq_scatter_heads(value_states, seq_dim=2, head_dim=1)
# (batch_size, num_head / sp_size, seq_length, head_size)
full_q_len = query_states.size(2) # full_q_len = seq_length
else:
full_q_len = q_len
# Because the input can be padded, the absolute sequence length depends on the max position id.
if position_embeddings is None:
cos, sin = self.rotary_emb(value_states, position_ids)
else:
cos, sin = position_embeddings
query_states, key_states = apply_multimodal_rotary_pos_emb(query_states, key_states, cos, sin,
self.rope_scaling["mrope_section"])
dropout_rate = 0.0 if not self.training else self.attention_dropout
# Reashape to the expected shape for Flash Attention
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
if (self.config.use_sliding_window and getattr(self.config, "sliding_window", None) is not None and
self.layer_idx >= self.config.max_window_layers):
sliding_window = self.config.sliding_window
else:
sliding_window = None
attn_output = flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
full_q_len,
dropout=dropout_rate,
sliding_window=sliding_window,
is_causal=self.is_causal,
use_top_left_mask=self._flash_attn_uses_top_left_mask,
position_ids=position_ids, # important: pass position ids
) # (batch_size, seq_length, num_head / sp_size, head_size)
if ulysses_sp_size > 1:
attn_output = gather_heads_scatter_seq(attn_output, head_dim=2, seq_dim=1)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, None, None
================================================
FILE: verl/models/weight_loader_registry.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_weight_loader(arch: str):
from verl.models.llama.megatron.checkpoint_utils.llama_loader import load_state_dict_to_megatron_llama
from verl.models.qwen2.megatron.checkpoint_utils.qwen2_loader import load_state_dict_to_megatron_qwen2
_MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY = {
'LlamaForCausalLM': load_state_dict_to_megatron_llama,
'Qwen2ForCausalLM': load_state_dict_to_megatron_qwen2,
}
if arch in _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY:
return _MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY[arch]
raise ValueError(f"Model architectures {arch} are not supported for now. "
f"Supported architectures: {_MODEL_WEIGHT_MEGATRON_LOADER_REGISTRY.keys()}")
================================================
FILE: verl/protocol.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implement base data transfer protocol between any two functions, modules.
We can subclass Protocol to define more detailed batch info with specific keys
"""
import pickle
import numpy as np
import pandas as pd
import copy
from dataclasses import dataclass, field
from typing import Callable, Dict, List, Union
import torch
import tensordict
from tensordict import TensorDict
from torch.utils.data import DataLoader, Dataset
from verl.utils.py_functional import union_two_dict
__all__ = ['DataProto', 'union_tensor_dict']
try:
tensordict.set_lazy_legacy(False).set()
except:
pass
def pad_dataproto_to_divisor(data: 'DataProto', size_divisor: int):
"""Pad a DataProto to size divisible by size_divisor
Args:
size_divisor (int): size divisor
Returns:
data: (DataProto): the padded DataProto
pad_size (int)
"""
assert isinstance(data, DataProto), 'data must be a DataProto'
if len(data) % size_divisor != 0:
pad_size = size_divisor - len(data) % size_divisor
padding_protos = []
remaining_pad = pad_size
while remaining_pad > 0:
take_size = min(remaining_pad, len(data))
padding_protos.append(data[:take_size])
remaining_pad -= take_size
data_padded = DataProto.concat([data] + padding_protos)
else:
pad_size = 0
data_padded = data
return data_padded, pad_size
def unpad_dataproto(data: 'DataProto', pad_size):
if pad_size != 0:
data = data[:-pad_size]
return data
def union_tensor_dict(tensor_dict1: TensorDict, tensor_dict2: TensorDict) -> TensorDict:
"""Union two tensordicts."""
assert tensor_dict1.batch_size == tensor_dict2.batch_size, \
f'Two tensor dict must have identical batch size. Got {tensor_dict1.batch_size} and {tensor_dict2.batch_size}'
for key in tensor_dict2.keys():
if key not in tensor_dict1.keys():
tensor_dict1[key] = tensor_dict2[key]
else:
assert tensor_dict1[key].equal(tensor_dict2[key]), \
f'{key} in tensor_dict1 and tensor_dict2 are not the same object'
return tensor_dict1
def union_numpy_dict(tensor_dict1: dict[str, np.ndarray], tensor_dict2: dict[str, np.ndarray]) -> dict[str, np.ndarray]:
for key, val in tensor_dict2.items():
if key in tensor_dict1:
assert isinstance(tensor_dict2[key], np.ndarray)
assert isinstance(tensor_dict1[key], np.ndarray)
# to properly deal with nan and object type
assert pd.DataFrame(tensor_dict2[key]).equals(pd.DataFrame(tensor_dict1[key])), \
f'{key} in tensor_dict1 and tensor_dict2 are not the same object'
tensor_dict1[key] = val
return tensor_dict1
def list_of_dict_to_dict_of_list(list_of_dict: list[dict]):
if len(list_of_dict) == 0:
return {}
keys = list_of_dict[0].keys()
output = {key: [] for key in keys}
for data in list_of_dict:
for key, item in data.items():
assert key in output
output[key].append(item)
return output
def fold_batch_dim(data: 'DataProto', new_batch_size):
"""
Fold a batch dim from [bsz, xxx] into [new_bsz, bsz // new_bsz, xxx]
"""
batch_size = data.batch.batch_size[0]
assert batch_size % new_batch_size == 0
tensor: TensorDict = data.batch
non_tensor = data.non_tensor_batch
tensor = tensor.view(new_batch_size, -1)
tensor.auto_batch_size_(batch_dims=1)
for key, val in non_tensor.items():
non_tensor[key] = np.reshape(val, newshape=(new_batch_size, -1, *val.shape[1:]))
return DataProto(batch=tensor, non_tensor_batch=non_tensor, meta_info=data.meta_info)
def unfold_batch_dim(data: 'DataProto', batch_dims=2):
"""
Unfold the first n dims as new batch dim
"""
tensor: TensorDict = data.batch
non_tensor = data.non_tensor_batch
tensor.auto_batch_size_(batch_dims=batch_dims)
tensor = tensor.view(-1)
batch_size = tensor.batch_size[0]
non_tensor_new = {}
for key, val in non_tensor.items():
non_tensor_new[key] = np.reshape(val, newshape=(batch_size, *val.shape[batch_dims:]))
return DataProto(batch=tensor, non_tensor_batch=non_tensor_new, meta_info=data.meta_info)
def collate_fn(x: list['DataProtoItem']):
batch = []
non_tensor_batch = []
for data in x:
batch.append(data.batch)
non_tensor_batch.append(data.non_tensor_batch)
batch = torch.stack(batch).contiguous()
non_tensor_batch = list_of_dict_to_dict_of_list(non_tensor_batch)
for key, val in non_tensor_batch.items():
non_tensor_batch[key] = np.array(val, dtype=object)
return DataProto(batch=batch, non_tensor_batch=non_tensor_batch)
@dataclass
class DataProtoItem:
# TODO(zhangchi.usc1992) add consistency check
batch: TensorDict = None
non_tensor_batch: Dict = field(default_factory=dict)
meta_info: Dict = field(default_factory=dict)
@dataclass
class DataProto:
"""
A DataProto is a data structure that aims to provide a standard protocol for data exchange between functions.
It contains a batch (TensorDict) and a meta_info (Dict). The batch is a TensorDict https://pytorch.org/tensordict/.
TensorDict allows you to manipulate a dictionary of Tensors like a single Tensor. Ideally, the tensors with the
same batch size should be put inside batch.
"""
batch: TensorDict = None
non_tensor_batch: Dict = field(default_factory=dict)
meta_info: Dict = field(default_factory=dict)
def __post_init__(self):
# perform necessary checking
self.check_consistency()
def __len__(self):
if self.batch is not None:
return self.batch.batch_size[0]
elif self.non_tensor_batch is not None and len(self.non_tensor_batch) > 0:
random_key = list(self.non_tensor_batch.keys())[0]
return self.non_tensor_batch[random_key].shape[0]
else:
return 0
def __getitem__(self, item):
tensor_data = self.batch[item]
non_tensor_data = {key: val[item] for key, val in self.non_tensor_batch.items()}
return DataProtoItem(batch=tensor_data, non_tensor_batch=non_tensor_data, meta_info=self.meta_info)
def __getstate__(self):
import io
buffer = io.BytesIO()
if tensordict.__version__ >= '0.5.0' and self.batch is not None:
self.batch = self.batch.contiguous()
self.batch = self.batch.consolidate()
torch.save(self.batch, buffer)
buffer_bytes = buffer.getvalue()
return buffer_bytes, self.non_tensor_batch, self.meta_info
def __setstate__(self, data):
import io
batch_deserialized_bytes, non_tensor_batch, meta_info = data
batch_deserialized = io.BytesIO(initial_bytes=batch_deserialized_bytes)
batch = torch.load(batch_deserialized,
weights_only=False,
map_location='cpu' if not torch.cuda.is_available() else None)
self.batch = batch
self.non_tensor_batch = non_tensor_batch
self.meta_info = meta_info
def save_to_disk(self, filepath):
with open(filepath, 'wb') as f:
pickle.dump(self, f)
@staticmethod
def load_from_disk(filepath) -> 'DataProto':
with open(filepath, 'rb') as f:
data = pickle.load(f)
return data
def print_size(self, prefix=""):
size_of_tensordict = 0
for key, tensor in self.batch.items():
size_of_tensordict += tensor.element_size() * tensor.numel()
size_of_numpy_array = 0
for key, numpy_array in self.non_tensor_batch.items():
size_of_numpy_array += numpy_array.nbytes
size_of_numpy_array /= 1024**3
size_of_tensordict /= 1024**3
message = f'Size of tensordict: {size_of_tensordict} GB, size of non_tensor_batch: {size_of_numpy_array} GB'
if prefix:
message = f'{prefix}, ' + message
print(message)
def check_consistency(self):
"""Check the consistency of the DataProto. Mainly for batch and non_tensor_batch
We expose this function as a public one so that user can call themselves directly
"""
if self.batch is not None:
assert len(self.batch.batch_size) == 1, 'only support num_batch_dims=1'
if self.non_tensor_batch is not None:
for key, val in self.non_tensor_batch.items():
assert isinstance(val, np.ndarray)
if self.batch is not None and len(self.non_tensor_batch) != 0:
# TODO: we can actually lift this restriction if needed
assert len(self.batch.batch_size) == 1, 'only support num_batch_dims=1 when non_tensor_batch is not empty.'
batch_size = self.batch.batch_size[0]
for key, val in self.non_tensor_batch.items():
assert isinstance(
val, np.ndarray
) and val.dtype == object, 'data in the non_tensor_batch must be a numpy.array with dtype=object'
assert val.shape[
0] == batch_size, f'key {key} length {len(val)} is not equal to batch size {batch_size}'
@classmethod
def from_single_dict(cls, data: Dict[str, Union[torch.Tensor, np.ndarray]], meta_info=None):
tensors = {}
non_tensors = {}
for key, val in data.items():
if isinstance(val, torch.Tensor):
tensors[key] = val
elif isinstance(val, np.ndarray):
non_tensors[key] = val
else:
raise ValueError(f'Unsupported type in data {type(val)}')
return DataProto.from_dict(tensors=tensors, non_tensors=non_tensors, meta_info=meta_info)
@classmethod
def from_dict(cls, tensors: Dict[str, torch.Tensor], non_tensors=None, meta_info=None, num_batch_dims=1):
"""Create a DataProto from a dict of tensors. This assumes that
1. All the tensor in tensors have the same dim0
2. Only dim0 is the batch dim
"""
assert len(tensors) > 0, 'tensors must not be empty'
assert num_batch_dims > 0, 'num_batch_dims must be greater than zero'
if non_tensors is not None:
assert num_batch_dims == 1, 'only support num_batch_dims=1 when non_tensors is not None.'
if meta_info is None:
meta_info = {}
if non_tensors is None:
non_tensors = {}
assert isinstance(non_tensors, dict)
# get and check batch size
batch_size = None
pivot_key = None
for key, tensor in tensors.items():
if batch_size is None:
batch_size = tensor.shape[:num_batch_dims]
pivot_key = key
else:
current_batch = tensor.shape[:num_batch_dims]
assert batch_size == current_batch, \
f'Not all the tensor in tensors have the same batch size with batch_dims={num_batch_dims}. Got {pivot_key} has {batch_size}, {key} has {current_batch}'
for key, val in non_tensors.items():
non_tensors[key] = np.array(val, dtype=object)
tensor_dict = TensorDict(source=tensors, batch_size=batch_size)
return cls(batch=tensor_dict, non_tensor_batch=non_tensors, meta_info=meta_info)
def to(self, device) -> 'DataProto':
"""move the batch to device
Args:
device (torch.device, str): torch device
Returns:
DataProto: the current DataProto
"""
if self.batch is not None:
self.batch = self.batch.to(device)
return self
def select(self, batch_keys=None, non_tensor_batch_keys=None, meta_info_keys=None, deepcopy=False) -> 'DataProto':
"""Select a subset of the DataProto via batch_keys and meta_info_keys
Args:
batch_keys (list, optional): a list of strings indicating the keys in batch to select
meta_info_keys (list, optional): a list of keys indicating the meta info to select
Returns:
DataProto: the DataProto with the selected batch_keys and meta_info_keys
"""
# TODO (zhangchi.usc1992) whether to copy
if batch_keys is not None:
batch_keys = tuple(batch_keys)
sub_batch = self.batch.select(*batch_keys)
else:
sub_batch = self.batch
if non_tensor_batch_keys is not None:
non_tensor_batch = {key: val for key, val in self.non_tensor_batch.items() if key in non_tensor_batch_keys}
else:
non_tensor_batch = self.non_tensor_batch
if deepcopy:
non_tensor_batch = copy.deepcopy(non_tensor_batch)
if meta_info_keys is not None:
sub_meta_info = {key: val for key, val in self.meta_info.items() if key in meta_info_keys}
else:
sub_meta_info = self.meta_info
if deepcopy:
sub_meta_info = copy.deepcopy(sub_meta_info)
return DataProto(batch=sub_batch, non_tensor_batch=non_tensor_batch, meta_info=sub_meta_info)
def pop(self, batch_keys=None, non_tensor_batch_keys=None, meta_info_keys=None) -> 'DataProto':
"""Pop a subset of the DataProto via `batch_keys` and `meta_info_keys`
Args:
batch_keys (list, optional): a list of strings indicating the keys in batch to pop
meta_info_keys (list, optional): a list of keys indicating the meta info to pop
Returns:
DataProto: the DataProto with the poped batch_keys and meta_info_keys
"""
assert batch_keys is not None
if meta_info_keys is None:
meta_info_keys = []
if non_tensor_batch_keys is None:
non_tensor_batch_keys = []
tensors = {}
# tensor batch
for key in batch_keys:
assert key in self.batch.keys()
tensors[key] = self.batch.pop(key)
non_tensors = {}
# non tensor batch
for key in non_tensor_batch_keys:
assert key in self.non_tensor_batch.keys()
non_tensors[key] = self.non_tensor_batch.pop(key)
meta_info = {}
for key in meta_info_keys:
assert key in self.meta_info.keys()
meta_info[key] = self.meta_info.pop(key)
return DataProto.from_dict(tensors=tensors, non_tensors=non_tensors, meta_info=meta_info)
def rename(self, old_keys=None, new_keys=None) -> 'DataProto':
"""
Note that this function only rename the key in the batch
"""
def validate_input(keys):
if keys is not None:
if isinstance(keys, str):
keys = [keys]
elif isinstance(keys, list):
pass
else:
raise TypeError(f'keys must be a list or a string, but got {type(keys)}')
return keys
old_keys = validate_input(old_keys)
new_keys = validate_input(new_keys)
if len(new_keys) != len(old_keys):
raise ValueError(
f'new_keys and old_keys must have the same length, but got {len(new_keys)} and {len(old_keys)}')
self.batch.rename_key_(tuple(old_keys), tuple(new_keys))
return self
def union(self, other: 'DataProto') -> 'DataProto':
"""Union with another DataProto. Union batch and meta_info separately.
Throw an error if
- there are conflict keys in batch and they are not equal
- the batch size of two data batch is not the same
- there are conflict keys in meta_info and they are not the same.
Args:
other (DataProto): another DataProto to union
Returns:
DataProto: the DataProto after union
"""
self.batch = union_tensor_dict(self.batch, other.batch)
self.non_tensor_batch = union_numpy_dict(self.non_tensor_batch, other.non_tensor_batch)
self.meta_info = union_two_dict(self.meta_info, other.meta_info)
return self
def make_iterator(self, mini_batch_size, epochs, seed=None, dataloader_kwargs=None):
r"""Make an iterator from the DataProto. This is built upon that TensorDict can be used as a normal Pytorch
dataset. See https://pytorch.org/tensordict/tutorials/data_fashion for more details.
Args:
mini_batch_size (int): mini-batch size when iterating the dataset. We require that ``batch.batch_size[0] % mini_batch_size == 0``.
epochs (int): number of epochs when iterating the dataset.
dataloader_kwargs (Any): internally, it returns a DataLoader over the batch. The dataloader_kwargs is the kwargs passed to the DataLoader.
Returns:
Iterator: an iterator that yields a mini-batch data at a time. The total number of iteration steps is ``self.batch.batch_size * epochs // mini_batch_size``
"""
assert self.batch.batch_size[0] % mini_batch_size == 0, f"{self.batch.batch_size[0]} % {mini_batch_size} != 0"
# we can directly create a dataloader from TensorDict
if dataloader_kwargs is None:
dataloader_kwargs = {}
if seed is not None:
generator = torch.Generator()
generator.manual_seed(seed)
else:
generator = None
assert isinstance(dataloader_kwargs, Dict)
train_dataloader = DataLoader(dataset=self,
batch_size=mini_batch_size,
collate_fn=collate_fn,
generator=generator,
**dataloader_kwargs)
def get_data():
for _ in range(epochs):
for d in train_dataloader:
d.meta_info = self.meta_info
yield d
return iter(get_data())
def chunk(self, chunks: int) -> List['DataProto']:
"""Split the batch among dim=0 into chunks. The meta_info is passed to each DataProto after split.
Args:
chunks (int): the number of chunks to split on dim=0
Returns:
List[DataProto]: a list of DataProto after splitting
"""
assert len(
self) % chunks == 0, f'only support equal chunk. Got size of DataProto {len(self)} and chunk {chunks}.'
if self.batch is not None:
batch_lst = self.batch.chunk(chunks=chunks, dim=0)
else:
batch_lst = [None for _ in range(chunks)]
non_tensor_batch_lst = [{} for _ in range(chunks)]
for key, val in self.non_tensor_batch.items():
assert isinstance(val, np.ndarray)
non_tensor_lst = np.array_split(val, chunks)
assert len(non_tensor_lst) == chunks
for i in range(chunks):
non_tensor_batch_lst[i][key] = non_tensor_lst[i]
output = []
for i in range(chunks):
output.append(
DataProto(batch=batch_lst[i], non_tensor_batch=non_tensor_batch_lst[i], meta_info=self.meta_info))
return output
@staticmethod
def concat(data: List['DataProto']) -> 'DataProto':
"""Concat a list of DataProto. The batch is concatenated among dim=0.
The meta_info is assumed to be identical and will use the first one.
Args:
data (List[DataProto]): list of DataProto
Returns:
DataProto: concatenated DataProto
"""
batch_lst = []
for batch in data:
batch_lst.append(batch.batch)
if batch_lst[0] is not None:
new_batch = torch.cat(batch_lst, dim=0)
else:
new_batch = None
non_tensor_batch = list_of_dict_to_dict_of_list(list_of_dict=[d.non_tensor_batch for d in data])
for key, val in non_tensor_batch.items():
non_tensor_batch[key] = np.concatenate(val, axis=0)
return DataProto(batch=new_batch, non_tensor_batch=non_tensor_batch, meta_info=data[0].meta_info)
def reorder(self, indices):
"""
Note that this operation is in-place
"""
indices_np = indices.detach().numpy()
self.batch = self.batch[indices]
self.non_tensor_batch = {key: val[indices_np] for key, val in self.non_tensor_batch.items()}
def repeat(self, repeat_times=2, interleave=True):
"""
Repeat the batch data a specified number of times.
Args:
repeat_times (int): Number of times to repeat the data.
interleave (bool): Whether to interleave the repeated data.
Returns:
DataProto: A new DataProto with repeated data.
"""
if self.batch is not None:
if interleave:
# Interleave the data
repeated_tensors = {
key: tensor.repeat_interleave(repeat_times, dim=0) for key, tensor in self.batch.items()
}
else:
# Stack the data
repeated_tensors = {
key: tensor.unsqueeze(0).expand(repeat_times, *tensor.shape).reshape(-1, *tensor.shape[1:])
for key, tensor in self.batch.items()
}
repeated_batch = TensorDict(
source=repeated_tensors,
batch_size=(self.batch.batch_size[0] * repeat_times,),
)
else:
repeated_batch = None
repeated_non_tensor_batch = {}
for key, val in self.non_tensor_batch.items():
if interleave:
repeated_non_tensor_batch[key] = np.repeat(val, repeat_times, axis=0)
else:
repeated_non_tensor_batch[key] = np.tile(val, (repeat_times,) + (1,) * (val.ndim - 1))
return DataProto(
batch=repeated_batch,
non_tensor_batch=repeated_non_tensor_batch,
meta_info=self.meta_info,
)
import ray
@dataclass
class DataProtoFuture:
"""
DataProtoFuture aims to eliminate actual data fetching on driver. By doing so, the driver doesn't have to wait
for data so that asynchronous execution becomes possible.
DataProtoFuture contains a list of futures from another WorkerGroup of size world_size.
- collect_fn is a Callable that reduces the list of futures to a DataProto
- dispatch_fn is a Callable that partitions the DataProto into a list of DataProto of size world_size and then select
Potential issue: we can optimize dispatch_fn(collect_fn) such that only needed data is fetched on destination
- DataProtoFuture only supports directly passing from the output of a method to another input. You can't perform any
operation on the DataProtoFuture in driver.
"""
collect_fn: Callable
futures: List[ray.ObjectRef]
dispatch_fn: Callable = None
@staticmethod
def concat(data: List[ray.ObjectRef]) -> 'DataProtoFuture':
output = DataProtoFuture(collect_fn=DataProto.concat, futures=data)
return output
def chunk(self, chunks: int) -> List['DataProtoFuture']:
from functools import partial
arg_future_lst = []
for i in range(chunks):
# note that we can't directly pass i and chunks
def dispatch_fn(x, i, chunks):
return x.chunk(chunks=chunks)[i]
arg_future = DataProtoFuture(collect_fn=self.collect_fn,
dispatch_fn=partial(dispatch_fn, i=i, chunks=chunks),
futures=self.futures)
arg_future_lst.append(arg_future)
return arg_future_lst
def get(self):
output = ray.get(self.futures) # dp_size.
for o in output:
assert isinstance(o, DataProto)
output = self.collect_fn(output) # select dp, concat
if self.dispatch_fn is not None:
output = self.dispatch_fn(output) # split in batch dim, select using dp
return output
================================================
FILE: verl/single_controller/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
version_folder = os.path.dirname(os.path.join(os.path.abspath(__file__)))
# Note(haibin.lin): single_controller.__version__ is deprecated
with open(os.path.join(os.path.join(version_folder, os.pardir), 'version/version')) as f:
__version__ = f.read().strip()
from . import base
from .base import *
__all__ = base.__all__
================================================
FILE: verl/single_controller/base/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .worker import Worker
from .worker_group import WorkerGroup, ClassWithInitArgs, ResourcePool
__all__ = ['Worker', 'WorkerGroup', 'ClassWithInitArgs', 'ResourcePool']
================================================
FILE: verl/single_controller/base/decorator.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from functools import wraps
from typing import Dict, List, Tuple
from types import FunctionType
from verl.protocol import DataProtoFuture
# here we add a magic number of avoid user-defined function already have this attribute
MAGIC_ATTR = 'attrs_3141562937'
class Dispatch(Enum):
RANK_ZERO = 0
ONE_TO_ALL = 1
ALL_TO_ALL = 2
MEGATRON_COMPUTE = 3
MEGATRON_PP_AS_DP = 4
MEGATRON_PP_ONLY = 5
MEGATRON_COMPUTE_PROTO = 6
MEGATRON_PP_AS_DP_PROTO = 7
DP_COMPUTE = 8
DP_COMPUTE_PROTO = 9
DP_COMPUTE_PROTO_WITH_FUNC = 10
DP_COMPUTE_METRIC = 11
class Execute(Enum):
ALL = 0
RANK_ZERO = 1
def _split_args_kwargs_data_proto(chunks, *args, **kwargs):
from verl.protocol import DataProto, DataProtoFuture
splitted_args = []
for arg in args:
assert isinstance(arg, (DataProto, DataProtoFuture))
splitted_args.append(arg.chunk(chunks=chunks))
splitted_kwargs = {}
for key, val in kwargs.items():
assert isinstance(val, (DataProto, DataProtoFuture))
splitted_kwargs[key] = val.chunk(chunks=chunks)
return splitted_args, splitted_kwargs
def dispatch_one_to_all(worker_group, *args, **kwargs):
args = tuple([arg] * worker_group.world_size for arg in args)
kwargs = {k: [v] * worker_group.world_size for k, v in kwargs.items()}
return args, kwargs
def dispatch_all_to_all(worker_group, *args, **kwargs):
return args, kwargs
def collect_all_to_all(worker_group, output):
return output
def dispatch_megatron_compute(worker_group, *args, **kwargs):
"""
User passes in dp data. The data is dispatched to all tp/pp ranks with the same dp
"""
from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
assert isinstance(worker_group,
MegatronWorkerGroup), f'worker_group must be MegatronWorkerGroup, Got {type(worker_group)}'
all_args = []
for arg in args:
assert isinstance(arg, (Tuple, List)) and len(arg) == worker_group.dp_size
transformed_args = []
for i in range(worker_group.world_size):
local_dp_rank = worker_group.get_megatron_rank_info(rank=i).dp_rank
transformed_args.append(arg[local_dp_rank])
all_args.append(transformed_args)
all_args = tuple(all_args)
all_kwargs = {}
for k, v in kwargs.items():
assert isinstance(v, (Tuple, List)) and len(v) == worker_group.dp_size
transformed_v = []
for i in range(worker_group.world_size):
local_dp_rank = worker_group.get_megatron_rank_info(rank=i).dp_rank
transformed_v.append(v[local_dp_rank])
all_kwargs[k] = transformed_v
return all_args, all_kwargs
def collect_megatron_compute(worker_group, output):
"""
Only collect the data from the tp=0 and pp=last and every dp ranks
"""
from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
assert isinstance(worker_group, MegatronWorkerGroup)
output_in_dp = []
pp_size = worker_group.get_megatron_global_info().pp_size
for global_rank in range(worker_group.world_size):
local_rank_info = worker_group.get_megatron_rank_info(rank=global_rank)
if local_rank_info.tp_rank == 0 and local_rank_info.pp_rank == pp_size - 1:
output_in_dp.append(output[global_rank])
return output_in_dp
def dispatch_megatron_compute_data_proto(worker_group, *args, **kwargs):
"""
All the args and kwargs must be DataProto. The batch will be chunked by dp_size and passed to each rank
"""
from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
assert isinstance(worker_group, MegatronWorkerGroup)
splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(worker_group.dp_size, *args, **kwargs)
return dispatch_megatron_compute(worker_group, *splitted_args, **splitted_kwargs)
def _concat_data_proto_or_future(output: List):
from verl.protocol import DataProto, DataProtoFuture
import ray
# make sure all the elements in output has the same type
for o in output:
assert type(o) == type(output[0])
o = output[0]
if isinstance(o, DataProto):
return DataProto.concat(output)
elif isinstance(o, ray.ObjectRef):
return DataProtoFuture.concat(output)
else:
raise NotImplementedError
def collect_megatron_compute_data_proto(worker_group, output):
"""
Each output must be a DataProto. We concat the dim=0 of output
"""
from verl.protocol import DataProto
import ray
output = collect_megatron_compute(worker_group, output)
for o in output:
assert isinstance(o, (DataProto, ray.ObjectRef)), f"expecting {o} to be DataProto, but got {type(o)}"
return _concat_data_proto_or_future(output)
def dispatch_megatron_pp_as_dp(worker_group, *args, **kwargs):
"""
treat pp as dp.
"""
from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
assert isinstance(worker_group, MegatronWorkerGroup)
pp_size = worker_group.pp_size
dp_size = worker_group.dp_size
pp_dp_size = pp_size * dp_size
all_args = []
for arg in args:
assert isinstance(arg, (List, Tuple)) and len(arg) == pp_dp_size
transformed_args = []
for i in range(worker_group.world_size):
local_dp_rank = worker_group.get_megatron_rank_info(rank=i).dp_rank
local_pp_rank = worker_group.get_megatron_rank_info(rank=i).pp_rank
# compute the rank in arg. Note that the order is dp then pp
# Also note that the outputs within a pp group will be firstly allgathered, then only the output of pp0 will be collected.
# For pp=2 dp=4, a batch of data "ABCDEFGH" should be dispatched and collected in below order:
# dispatch: pp_allgther: collect:
# dp 0 1 2 3 dp 0 1 2 3
# pp +---------+ pp +-------------+
# 0 | A C E G | 0 | AB CD EF GH | ABCDEFGH
# 1 | B D F H | 1 | AB CD EF GH |
# +---------+ +-------------+
arg_rank = local_dp_rank * worker_group.pp_size + local_pp_rank
transformed_args.append(arg[arg_rank])
all_args.append(transformed_args)
all_args = tuple(all_args)
all_kwargs = {}
for k, v in kwargs.items():
assert isinstance(v, (List, Tuple)) and len(v) == pp_dp_size, f'expect len(v)=={pp_dp_size}, got {len(v)}'
transformed_v = []
for i in range(worker_group.world_size):
local_dp_rank = worker_group.get_megatron_rank_info(rank=i).dp_rank
local_pp_rank = worker_group.get_megatron_rank_info(rank=i).pp_rank
# compute the rank in arg. Note that the order is dp then pp
arg_rank = local_dp_rank * worker_group.pp_size + local_pp_rank
transformed_v.append(v[arg_rank])
all_kwargs[k] = transformed_v
return all_args, all_kwargs
def collect_megatron_pp_as_dp(worker_group, output):
"""
treat pp as dp. Only collect data on tp=0
"""
from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
assert isinstance(worker_group, MegatronWorkerGroup)
output_in_dp = []
for global_rank in range(worker_group.world_size):
local_rank_info = worker_group.get_megatron_rank_info(rank=global_rank)
if local_rank_info.tp_rank == 0 and local_rank_info.pp_rank == 0:
output_in_dp.append(output[global_rank])
return output_in_dp
def collect_megatron_pp_only(worker_group, output):
"""
Only collect output of megatron pp. This is useful when examine weight names as they are identical in tp/dp
"""
from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
assert isinstance(worker_group, MegatronWorkerGroup)
output_in_pp = []
for global_rank in range(worker_group.world_size):
local_rank_info = worker_group.get_megatron_rank_info(rank=global_rank)
if local_rank_info.tp_rank == 0 and local_rank_info.dp_rank == 0:
output_in_pp.append(output[global_rank])
return output_in_pp
def dispatch_megatron_pp_as_dp_data_proto(worker_group, *args, **kwargs):
from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
assert isinstance(worker_group, MegatronWorkerGroup)
pp_dp_size = worker_group.dp_size * worker_group.pp_size
splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(pp_dp_size, *args, **kwargs)
return dispatch_megatron_pp_as_dp(worker_group, *splitted_args, **splitted_kwargs)
def collect_megatron_pp_as_dp_data_proto(worker_group, output):
from verl.protocol import DataProto
from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
assert isinstance(worker_group, MegatronWorkerGroup)
output = collect_megatron_pp_as_dp(worker_group, output)
return _concat_data_proto_or_future(output)
def dispatch_dp_compute(worker_group, *args, **kwargs):
from verl.single_controller.base.worker_group import WorkerGroup
assert isinstance(worker_group, WorkerGroup)
for arg in args:
assert isinstance(arg, (Tuple, List)) and len(arg) == worker_group.world_size
for k, v in kwargs.items():
assert isinstance(v, (Tuple, List)) and len(v) == worker_group.world_size
return args, kwargs
def collect_dp_compute(worker_group, output):
from verl.single_controller.base.worker_group import WorkerGroup
assert isinstance(worker_group, WorkerGroup)
assert len(output) == worker_group.world_size
return output
def dispatch_dp_compute_data_proto(worker_group, *args, **kwargs):
from verl.single_controller.base.worker_group import WorkerGroup
assert isinstance(worker_group, WorkerGroup)
splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(worker_group.world_size, *args, **kwargs)
return splitted_args, splitted_kwargs
def dispatch_dp_compute_data_proto_with_func(worker_group, *args, **kwargs):
from verl.single_controller.base.worker_group import WorkerGroup
assert isinstance(worker_group, WorkerGroup)
assert type(args[0]) == FunctionType # NOTE: The first one args is a function!
splitted_args, splitted_kwargs = _split_args_kwargs_data_proto(worker_group.world_size, *args[1:], **kwargs)
splitted_args_with_func = [[args[0]] * worker_group.world_size] + splitted_args
return splitted_args_with_func, splitted_kwargs
def collect_dp_compute_data_proto(worker_group, output):
from verl.protocol import DataProto
import ray
for o in output:
assert isinstance(o, (DataProto, ray.ObjectRef)), f"expecting {o} to be DataProto, but got {type(o)}"
output = collect_dp_compute(worker_group, output)
return _concat_data_proto_or_future(output)
def get_predefined_dispatch_fn(dispatch_mode):
predefined_dispatch_mode_fn = {
Dispatch.ONE_TO_ALL: {
'dispatch_fn': dispatch_one_to_all,
'collect_fn': collect_all_to_all,
},
Dispatch.ALL_TO_ALL: {
'dispatch_fn': dispatch_all_to_all,
'collect_fn': collect_all_to_all,
},
Dispatch.MEGATRON_COMPUTE: {
'dispatch_fn': dispatch_megatron_compute,
'collect_fn': collect_megatron_compute,
},
Dispatch.MEGATRON_PP_AS_DP: {
'dispatch_fn': dispatch_megatron_pp_as_dp,
'collect_fn': collect_megatron_pp_as_dp,
},
Dispatch.MEGATRON_PP_ONLY: {
'dispatch_fn': dispatch_one_to_all,
'collect_fn': collect_megatron_pp_only
},
Dispatch.MEGATRON_COMPUTE_PROTO: {
'dispatch_fn': dispatch_megatron_compute_data_proto,
'collect_fn': collect_megatron_compute_data_proto
},
Dispatch.MEGATRON_PP_AS_DP_PROTO: {
'dispatch_fn': dispatch_megatron_pp_as_dp_data_proto,
'collect_fn': collect_megatron_pp_as_dp_data_proto
},
Dispatch.DP_COMPUTE: {
'dispatch_fn': dispatch_dp_compute,
'collect_fn': collect_dp_compute
},
Dispatch.DP_COMPUTE_PROTO: {
'dispatch_fn': dispatch_dp_compute_data_proto,
'collect_fn': collect_dp_compute_data_proto
},
Dispatch.DP_COMPUTE_PROTO_WITH_FUNC: {
'dispatch_fn': dispatch_dp_compute_data_proto_with_func,
'collect_fn': collect_dp_compute_data_proto
},
Dispatch.DP_COMPUTE_METRIC: {
'dispatch_fn': dispatch_dp_compute_data_proto,
'collect_fn': collect_dp_compute
}
}
return predefined_dispatch_mode_fn[dispatch_mode]
def get_predefined_execute_fn(execute_mode):
"""
Note that here we only asks execute_all and execute_rank_zero to be implemented
Leave the choice of how these two functions handle argument 'blocking' to users
"""
predefined_execute_mode_fn = {
Execute.ALL: {
'execute_fn_name': 'execute_all'
},
Execute.RANK_ZERO: {
'execute_fn_name': 'execute_rank_zero'
}
}
return predefined_execute_mode_fn[execute_mode]
def _check_dispatch_mode(dispatch_mode):
assert isinstance(dispatch_mode,
(Dispatch, Dict)), f'dispatch_mode must be a Dispatch or a Dict. Got {dispatch_mode}'
if isinstance(dispatch_mode, Dict):
necessary_keys = ['dispatch_fn', 'collect_fn']
for key in necessary_keys:
assert key in dispatch_mode, f'key {key} should be in dispatch_mode if it is a dictionary'
def _check_execute_mode(execute_mode):
assert isinstance(execute_mode, Execute), f'execute_mode must be a Execute. Got {execute_mode}'
def _materialize_futures(*args, **kwargs):
new_args = []
for arg in args:
if isinstance(arg, DataProtoFuture):
arg = arg.get()
# add more type to materialize
new_args.append(arg)
for k, v in kwargs.items():
if isinstance(v, DataProtoFuture):
kwargs[k] = v.get()
new_args = tuple(new_args)
return new_args, kwargs
def register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.ALL, blocking=True, materialize_futures=True):
_check_dispatch_mode(dispatch_mode=dispatch_mode)
_check_execute_mode(execute_mode=execute_mode)
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
if materialize_futures:
args, kwargs = _materialize_futures(*args, **kwargs)
return func(*args, **kwargs)
attrs = {'dispatch_mode': dispatch_mode, 'execute_mode': execute_mode, 'blocking': blocking}
setattr(inner, MAGIC_ATTR, attrs)
return inner
return decorator
================================================
FILE: verl/single_controller/base/megatron/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/single_controller/base/megatron/worker.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from verl.single_controller.base.worker import Worker, DistRankInfo, DistGlobalInfo
class MegatronWorker(Worker):
def __init__(self, cuda_visible_devices=None) -> None:
super().__init__(cuda_visible_devices)
def get_megatron_global_info(self):
from megatron.core import parallel_state as mpu
tp_size = mpu.get_tensor_model_parallel_world_size()
dp_size = mpu.get_data_parallel_world_size()
pp_size = mpu.get_pipeline_model_parallel_world_size()
info = DistGlobalInfo(tp_size=tp_size, dp_size=dp_size, pp_size=pp_size)
return info
def get_megatron_rank_info(self):
from megatron.core import parallel_state as mpu
tp_rank = mpu.get_tensor_model_parallel_rank()
dp_rank = mpu.get_data_parallel_rank()
pp_rank = mpu.get_pipeline_model_parallel_rank()
info = DistRankInfo(tp_rank=tp_rank, dp_rank=dp_rank, pp_rank=pp_rank)
return info
================================================
FILE: verl/single_controller/base/megatron/worker_group.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from .worker import DistRankInfo, DistGlobalInfo
from verl.single_controller.base import ResourcePool, WorkerGroup
class MegatronWorkerGroup(WorkerGroup):
def __init__(self, resource_pool: ResourcePool, **kwargs):
super().__init__(resource_pool=resource_pool, **kwargs)
self._megatron_rank_info = None
self._megatron_global_info: DistGlobalInfo = None
def init_megatron(self, default_megatron_kwargs: Dict = None):
raise NotImplementedError(f"MegatronWorkerGroup.init_megatron should be overwritten")
def get_megatron_rank_info(self, rank: int) -> DistRankInfo:
assert 0 <= rank < self.world_size, f'rank must be from [0, world_size), Got {rank}'
return self._megatron_rank_info[rank]
@property
def tp_size(self):
assert self._megatron_global_info is not None, "MegatronWorkerGroup._megatron_global_info must be initialized"
return self._megatron_global_info.tp_size
@property
def dp_size(self):
assert self._megatron_global_info is not None, "MegatronWorkerGroup._megatron_global_info must be initialized"
return self._megatron_global_info.dp_size
@property
def pp_size(self):
assert self._megatron_global_info is not None, "MegatronWorkerGroup._megatron_global_info must be initialized"
return self._megatron_global_info.pp_size
def get_megatron_global_info(self):
return self._megatron_global_info
================================================
FILE: verl/single_controller/base/register_center/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/single_controller/base/register_center/ray.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ray
@ray.remote
class WorkerGroupRegisterCenter:
def __init__(self, rank_zero_info):
self.rank_zero_info = rank_zero_info
def get_rank_zero_info(self):
return self.rank_zero_info
def create_worker_group_register_center(name, info):
return WorkerGroupRegisterCenter.options(name=name).remote(info)
================================================
FILE: verl/single_controller/base/worker.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
the class for Worker
"""
import os
import socket
from dataclasses import dataclass
from .decorator import register, Dispatch, Execute
@dataclass
class DistRankInfo:
tp_rank: int
dp_rank: int
pp_rank: int
@dataclass
class DistGlobalInfo:
tp_size: int
dp_size: int
pp_size: int
class WorkerHelper:
def _get_node_ip(self):
def get_node_ip_by_sdk():
if os.getenv("WG_BACKEND", None) == "ray":
import ray
return ray._private.services.get_node_ip_address()
else:
raise NotImplementedError("WG_BACKEND now just support ray mode.")
host_ipv4 = os.getenv("MY_HOST_IP", None)
host_ipv6 = os.getenv("MY_HOST_IPV6", None)
host_ip_by_env = host_ipv4 or host_ipv6
host_ip_by_sdk = get_node_ip_by_sdk()
host_ip = host_ip_by_env or host_ip_by_sdk
return host_ip
def _get_free_port(self):
with socket.socket() as sock:
sock.bind(('', 0))
return sock.getsockname()[1]
def get_availale_master_addr_port(self):
return self._get_node_ip(), str(self._get_free_port())
def _get_pid(self):
return
class WorkerMeta:
keys = [
"WORLD_SIZE", "RANK", "LOCAL_WORLD_SIZE", "LOCAL_RANK", "MASTER_ADDR", "MASTER_PORT", "CUDA_VISIBLE_DEVICES"
]
def __init__(self, store) -> None:
self._store = store
def to_dict(self):
return {f"_{key.lower()}": self._store.get(f"_{key.lower()}", None) for key in WorkerMeta.keys}
# we assume that in each WorkerGroup, there is a Master Worker
class Worker(WorkerHelper):
"""A (distributed) worker."""
def __new__(cls, *args, **kwargs):
instance = super().__new__(cls)
# note that here we use int to distinguish
disable_worker_init = int(os.environ.get('DISABLE_WORKER_INIT', 0))
if disable_worker_init:
return instance
rank = os.environ.get("RANK", None)
worker_group_prefix = os.environ.get("WG_PREFIX", None)
# when decorator @ray.remote applies, __new__ will be called while we don't want to apply _configure_before_init
if None not in [rank, worker_group_prefix] and 'ActorClass(' not in cls.__name__:
instance._configure_before_init(f"{worker_group_prefix}_register_center", int(rank))
return instance
def _configure_before_init(self, register_center_name: str, rank: int):
assert isinstance(rank, int), f"rank must be int, instead of {type(rank)}"
if rank == 0:
master_addr, master_port = self.get_availale_master_addr_port()
rank_zero_info = {
"MASTER_ADDR": master_addr,
"MASTER_PORT": master_port,
}
if os.getenv("WG_BACKEND", None) == "ray":
from verl.single_controller.base.register_center.ray import create_worker_group_register_center
self.register_center = create_worker_group_register_center(name=register_center_name,
info=rank_zero_info)
os.environ.update(rank_zero_info)
def __init__(self, cuda_visible_devices=None) -> None:
# construct a meta from envrionment variable. Note that the import must be inside the class because it is executed remotely
import os
world_size = int(os.environ['WORLD_SIZE'])
rank = int(os.environ['RANK'])
self._rank = rank
self._world_size = world_size
master_addr = os.environ["MASTER_ADDR"]
master_port = os.environ["MASTER_PORT"]
local_world_size = int(os.getenv("LOCAL_WORLD_SIZE", "1"))
local_rank = int(os.getenv("LOCAL_RANK", "0"))
store = {
'_world_size': world_size,
'_rank': rank,
'_local_world_size': local_world_size,
'_local_rank': local_rank,
'_master_addr': master_addr,
'_master_port': master_port
}
if cuda_visible_devices is not None:
store['_cuda_visible_devices'] = cuda_visible_devices
meta = WorkerMeta(store=store)
self._configure_with_meta(meta=meta)
def _configure_with_meta(self, meta: WorkerMeta):
"""
This function should only be called inside by WorkerGroup
"""
assert isinstance(meta, WorkerMeta)
self.__dict__.update(meta.to_dict()) # this is hacky
# print(f"__dict__: {self.__dict__}")
for key in WorkerMeta.keys:
val = self.__dict__.get(f"_{key.lower()}", None)
if val is not None:
# print(f"set {key} to {val}")
os.environ[key] = str(val)
os.environ["REDIS_STORE_SERVER_HOST"] = str(self._master_addr).replace("[", "").replace(
"]", "") if self._master_addr else ""
def get_master_addr_port(self):
return self._master_addr, self._master_port
def get_cuda_visible_devices(self):
import os
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "not set")
return cuda_visible_devices
@property
def world_size(self):
return self._world_size
@property
def rank(self):
return self._rank
@register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO_WITH_FUNC)
def execute_with_func_generator(self, func, *args, **kwargs):
ret_proto = func(self, *args, **kwargs)
return ret_proto
@register(dispatch_mode=Dispatch.ALL_TO_ALL, execute_mode=Execute.RANK_ZERO)
def execute_func_rank_zero(self, func, *args, **kwargs):
result = func(*args, **kwargs)
return result
================================================
FILE: verl/single_controller/base/worker_group.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
the class of WorkerGroup
"""
import logging
import threading
import signal
import time
from typing import List, Any, Callable, Dict
from .decorator import MAGIC_ATTR, Dispatch, get_predefined_dispatch_fn, get_predefined_execute_fn
class ResourcePool:
"""The resource pool with meta info such as world_size."""
def __init__(self, process_on_nodes=None, max_collocate_count: int = 10, n_gpus_per_node=8) -> None:
if process_on_nodes is None:
process_on_nodes = []
self._store = process_on_nodes
self.max_collocate_count = max_collocate_count
self.n_gpus_per_node = n_gpus_per_node # this is left for future huawei GPU that contains 16 GPUs per node
def add_node(self, process_count):
self._store.append(process_count)
@property
def world_size(self):
return sum(self._store)
def __call__(self) -> Any:
return self._store
@property
def store(self):
return self._store
def local_world_size_list(self) -> List[int]:
nested_local_world_size_list = [
[local_world_size for _ in range(local_world_size)] for local_world_size in self._store
]
return [item for row in nested_local_world_size_list for item in row]
def local_rank_list(self) -> List[int]:
nested_local_rank_list = [[i for i in range(local_world_size)] for local_world_size in self._store]
return [item for row in nested_local_rank_list for item in row]
class ClassWithInitArgs:
"""
This class stores a class constructor and the args/kwargs to construct the class.
It is used to instantiate the remote class.
"""
def __init__(self, cls, *args, **kwargs) -> None:
self.cls = cls
self.args = args
self.kwargs = kwargs
# def add_arg(self, arg):
# self.args += (arg,)
# def add_kwarg(self, key, value):
# self.kwargs[key] = value
def __call__(self) -> Any:
return self.cls(*self.args, **self.kwargs)
def check_workers_alive(workers: List, is_alive: Callable, gap_time: float = 1) -> None:
import time
while True:
for worker in workers:
if not is_alive(worker):
logging.warning(f"worker {worker} is not alive" + " sending signal to main thread")
signal.raise_signal(signal.SIGABRT)
time.sleep(gap_time)
class WorkerGroup:
"""A group of workers"""
def __init__(self, resource_pool: ResourcePool, **kwargs) -> None:
self._is_init_with_detached_workers = True if resource_pool is None else False
if resource_pool is not None:
# handle the case when WorkGroup is attached to an existing one
self._procecss_dispatch_config = resource_pool()
else:
self._procecss_dispatch_config = None
self._workers = []
self._worker_names = []
self._master_addr = None
self._master_port = None
self._checker_thread: threading.Thread = None
def _is_worker_alive(self, worker):
raise NotImplementedError(f"WorkerGroup._is_worker_alive called, should be implemented in derived class.")
def _block_until_all_workers_alive(self) -> None:
while True:
all_state = [self._is_worker_alive(worker) for worker in self._workers]
if False in all_state:
time.sleep(1)
else:
break
def start_worker_aliveness_check(self, every_n_seconds=1) -> None:
# before starting checking worker aliveness, make sure all workers are already alive
self._block_until_all_workers_alive()
self._checker_thread = threading.Thread(target=check_workers_alive,
args=(self._workers, self._is_worker_alive, every_n_seconds))
self._checker_thread.start()
@property
def world_size(self):
return len(self._workers)
# execute_all_async and execute_rank_zero_async should be implemented by RayWorkerGroup, TorchRPCWorkerGroup,
# MegatronWorkerGroup, XperfWorkerGroup should skip
def _bind_worker_method(self, user_defined_cls, func_generator):
"""
Bind the worker method to the WorkerGroup
"""
for method_name in dir(user_defined_cls):
try:
method = getattr(user_defined_cls, method_name)
assert callable(method), f"{method_name} in {user_defined_cls} is not callable"
except Exception as e:
# if it is a property, it will fail because Class doesn't have instance property
continue
if hasattr(method, MAGIC_ATTR):
# this method is decorated by register
attribute = getattr(method, MAGIC_ATTR)
assert isinstance(attribute, Dict), f'attribute must be a dictionary. Got {type(attribute)}'
assert 'dispatch_mode' in attribute, f'attribute must contain dispatch_mode in its key'
dispatch_mode = attribute['dispatch_mode']
execute_mode = attribute['execute_mode']
blocking = attribute['blocking']
# get dispatch fn
if isinstance(dispatch_mode, Dispatch):
# get default dispatch fn
fn = get_predefined_dispatch_fn(dispatch_mode=dispatch_mode)
dispatch_fn = fn['dispatch_fn']
collect_fn = fn['collect_fn']
else:
assert isinstance(dispatch_mode, dict)
assert 'dispatch_fn' in dispatch_mode
assert 'collect_fn' in dispatch_mode
dispatch_fn = dispatch_mode['dispatch_fn']
collect_fn = dispatch_mode['collect_fn']
# get execute_fn_name
execute_mode = get_predefined_execute_fn(execute_mode=execute_mode)
wg_execute_fn_name = execute_mode['execute_fn_name']
# get execute_fn from string
try:
execute_fn = getattr(self, wg_execute_fn_name)
assert callable(execute_fn), 'execute_fn must be callable'
except Exception as e:
print(f'execute_fn {wg_execute_fn_name} is invalid')
raise
# bind a new method to the RayWorkerGroup
func = func_generator(self,
method_name,
dispatch_fn=dispatch_fn,
collect_fn=collect_fn,
execute_fn=execute_fn,
blocking=blocking)
try:
setattr(self, method_name, func)
except Exception as e:
raise ValueError(f'Fail to set method_name {method_name}')
================================================
FILE: verl/single_controller/ray/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import RayResourcePool, RayClassWithInitArgs, RayWorkerGroup, create_colocated_worker_cls
================================================
FILE: verl/single_controller/ray/base.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Dict, List, Any, Tuple
import ray
from ray.util import list_named_actors
from ray.util.placement_group import placement_group, PlacementGroup
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy, NodeAffinitySchedulingStrategy
from ray.experimental.state.api import get_actor
from verl.single_controller.base import WorkerGroup, ResourcePool, ClassWithInitArgs, Worker
__all__ = ['Worker']
def get_random_string(length: int) -> str:
import random
import string
letters_digits = string.ascii_letters + string.digits
return ''.join(random.choice(letters_digits) for _ in range(length))
def func_generator(self, method_name, dispatch_fn, collect_fn, execute_fn, blocking):
def func(*args, **kwargs):
args, kwargs = dispatch_fn(self, *args, **kwargs)
output = execute_fn(method_name, *args, **kwargs)
if blocking:
output = ray.get(output)
output = collect_fn(self, output)
return output
return func
class RayResourcePool(ResourcePool):
def __init__(self,
process_on_nodes: List[int] = None,
use_gpu: bool = True,
name_prefix: str = "",
max_colocate_count: int = 5,
detached=False) -> None:
super().__init__(process_on_nodes, max_colocate_count)
self.use_gpu = use_gpu
# print(f"in RayProcessDispatchConfiguration: name_prefix = {name_prefix}")
self.name_prefix = name_prefix
self.pgs = None
self.detached = detached
def get_placement_groups(self, strategy="STRICT_PACK", name=None):
if self.pgs is not None:
return self.pgs
pg_name_prefix = name if name else \
f"{self.name_prefix}verl_group_{'_'.join([str(count) for count in self._store])}:"
# print(f"pg_name_prefix = {pg_name_prefix}")
pg_scheme = [[{
"CPU": self.max_collocate_count,
"GPU": 1
} if self.use_gpu else {
"CPU": self.max_collocate_count
} for _ in range(process_count)] for process_count in self._store]
lifetime = 'detached' if self.detached else None
pgs = [
placement_group(bundles=bundles, strategy=strategy, name=pg_name_prefix + str(idx), lifetime=lifetime)
for idx, bundles in enumerate(pg_scheme)
]
ray.get([pg.ready() for pg in pgs])
self.pgs = pgs
return pgs
def extract_pg_from_exist(resource_pools: Dict[str, RayResourcePool], src_role_names: List[str],
resource_pool: RayResourcePool) -> List:
src_pgs = [
pg for role_name, resource_pool in resource_pools.items() for pg in resource_pool.get_placement_groups()
if role_name in src_role_names
]
sorted_src_pgs = sorted(src_pgs, key=lambda pg: pg.bundle_count, reverse=True)
sorted_process_on_nodes = sorted([(val, idx) for idx, val in enumerate(resource_pool.store)], reverse=True)
unsorted_pgs: List[Tuple[int, PlacementGroup]] = []
searching_idx = 0
for request_process, original_idx in sorted_process_on_nodes:
assert searching_idx < len(sorted_src_pgs), f"no enough nodes for request: searching {searching_idx} th node"
assert request_process <= sorted_src_pgs[searching_idx].bundle_count, \
f"requesting {request_process} processes, bundle count cannot satisfy"
unsorted_pgs.append((original_idx, sorted_src_pgs[searching_idx]))
searching_idx += 1
return [pg for _, pg in sorted(unsorted_pgs)]
def merge_resource_pool(rp1: RayResourcePool, rp2: RayResourcePool) -> RayResourcePool:
assert rp1.use_gpu == rp2.use_gpu, 'Both RayResourcePool must either use_gpu or not'
assert rp1.max_collocate_count == rp2.max_collocate_count, 'Both RayResourcePool must has the same max_collocate_count'
assert rp1.n_gpus_per_node == rp2.n_gpus_per_node, 'Both RayResourcePool must has the same n_gpus_per_node'
assert rp1.detached == rp2.detached, 'Detached ResourcePool cannot be merged with non-detached ResourcePool'
new_store = rp1.store + rp2.store
merged = RayResourcePool(new_store, rp1.use_gpu, f"{rp1.name_prefix}_{rp2.name_prefix}")
merged.pgs = rp1.get_placement_groups() + rp2.get_placement_groups()
return merged
class RayClassWithInitArgs(ClassWithInitArgs):
def __init__(self, cls, *args, **kwargs) -> None:
# self._options = kwargs.pop('options', dict())
super().__init__(cls, *args, **kwargs)
self._options = {}
self._additional_resource = {}
def set_additional_resource(self, additional_resource):
self._additional_resource = additional_resource
def update_options(self, options: Dict):
self._options.update(options)
def __call__(self,
placement_group,
placement_group_bundle_idx,
use_gpu: bool = True,
num_gpus=1,
sharing_with=None) -> Any:
if sharing_with is not None:
target_node_id = ray.get(sharing_with.get_node_id.remote())
cuda_visible_devices = ray.get(sharing_with.get_cuda_visible_devices.remote())
options = {"scheduling_strategy": NodeAffinitySchedulingStrategy(node_id=target_node_id, soft=False)}
return self.cls.options(**options).remote(*self.args,
cuda_visible_devices=cuda_visible_devices,
**self.kwargs)
options = {
"scheduling_strategy":
PlacementGroupSchedulingStrategy(placement_group=placement_group,
placement_group_bundle_index=placement_group_bundle_idx)
}
options.update(self._options)
if use_gpu:
options["num_gpus"] = num_gpus
if len(self._additional_resource) > 1:
for k, v in self._additional_resource.items():
options[k] = v
# print("cls:", self.cls)
# print("args: ", self.args)
# print("kwargs: ", self.kwargs)
return self.cls.options(**options).remote(*self.args, **self.kwargs)
class RayWorkerGroup(WorkerGroup):
def __init__(self,
resource_pool: RayResourcePool = None,
ray_cls_with_init: RayClassWithInitArgs = None,
bin_pack: bool = True,
name_prefix: str = None,
detached=False,
worker_names=None,
**kwargs) -> None:
super().__init__(resource_pool=resource_pool, **kwargs)
self.ray_cls_with_init = ray_cls_with_init
self.name_prefix = get_random_string(length=6) if name_prefix is None else name_prefix
if worker_names is not None:
assert self._is_init_with_detached_workers
self._worker_names = worker_names
if self._is_init_with_detached_workers:
self._init_with_detached_workers(worker_names=worker_names)
else:
self._init_with_resource_pool(resource_pool=resource_pool,
ray_cls_with_init=ray_cls_with_init,
bin_pack=bin_pack,
detached=detached)
if ray_cls_with_init is not None:
self._bind_worker_method(self.ray_cls_with_init.cls, func_generator)
def _is_worker_alive(self, worker: ray.actor.ActorHandle):
worker_state_dict = get_actor(worker._actor_id.hex())
return worker_state_dict.get("state", "undefined") == "ALIVE" if worker_state_dict is not None else False
def _init_with_detached_workers(self, worker_names):
workers = [ray.get_actor(name=name) for name in worker_names]
self._workers = workers
self._world_size = len(worker_names)
def _init_with_resource_pool(self, resource_pool, ray_cls_with_init, bin_pack, detached):
use_gpu = resource_pool.use_gpu
strategy = "PACK"
if bin_pack:
strategy = "STRICT_PACK"
pgs = resource_pool.get_placement_groups(strategy=strategy)
world_size = resource_pool.world_size
self._world_size = world_size
# cia.add_kwarg("_world_size", world_size)
num_gpus = 1 / resource_pool.max_collocate_count
rank = -1
for pg_idx, local_world_size in enumerate(resource_pool.store):
pg = pgs[pg_idx]
assert local_world_size <= pg.bundle_count, \
f"when generating for {self.name_prefix}, for the "
for local_rank in range(local_world_size):
rank += 1
# we pass in environment variable at option so that Worker can use environment variable to set
env_vars = {
'WORLD_SIZE': str(world_size),
'RANK': str(rank),
'WG_PREFIX': self.name_prefix,
'WG_BACKEND': 'ray',
'RAY_LOCAL_WORLD_SIZE': str(local_world_size),
'RAY_LOCAL_RANK': str(local_rank),
}
if rank != 0:
env_vars['MASTER_ADDR'] = self._master_addr
env_vars['MASTER_PORT'] = self._master_port
import re
cia_name = type(ray_cls_with_init.cls).__name__
match = re.search(r"ActorClass\(([^)]+)\)", cia_name) # ray.remote(Obj) -> "ActorClass(Obj)"
cia_name = match.group(1) if match else cia_name # "ActorClass(Obj)" -> "Obj"
name = f"{self.name_prefix}{cia_name}_{pg_idx}:{local_rank}" # e.g. Worker_2:5
ray_cls_with_init.update_options({'runtime_env': {'env_vars': env_vars}, 'name': name})
if detached:
ray_cls_with_init.update_options({'lifetime': 'detached'})
# create a worker
worker = ray_cls_with_init(placement_group=pg,
placement_group_bundle_idx=local_rank,
use_gpu=use_gpu,
num_gpus=num_gpus)
self._workers.append(worker)
self._worker_names.append(name)
if rank == 0:
register_center_actor = None
for _ in range(120):
if f"{self.name_prefix}_register_center" not in list_named_actors():
time.sleep(1)
else:
register_center_actor = ray.get_actor(f"{self.name_prefix}_register_center")
break
assert register_center_actor is not None, f"failed to get register_center_actor: {self.name_prefix}_register_center in {list_named_actors(all_namespaces=True)}"
rank_zero_info = ray.get(register_center_actor.get_rank_zero_info.remote())
self._master_addr, self._master_port = rank_zero_info['MASTER_ADDR'], rank_zero_info['MASTER_PORT']
# print(f"rank_zero_info: {rank_zero_info}")
# print(f"master_addr: {self._master_addr}, master_port: {self._master_port}")
@property
def worker_names(self):
return self._worker_names
@classmethod
def from_detached(cls, worker_names=None, ray_cls_with_init=None):
worker_group = cls(resource_pool=None,
ray_cls_with_init=ray_cls_with_init,
name_prefix=None,
worker_names=worker_names)
return worker_group
def spawn(self, prefix_set):
"""
spawn to a dictionary of worker groups, each with a subset of method with prefix.
"""
def _rebind_actor_methods(worker_group, actor_name):
"""
bind the method with actor_prefix to its original name
"""
prefix: str = actor_name + '_'
for method_name in dir(worker_group):
if method_name.startswith(prefix):
# only valid when Python >= 3.9
original_method_name = method_name.removeprefix(prefix)
method = getattr(worker_group, method_name)
setattr(worker_group, original_method_name, method)
new_worker_group_dict = {}
for prefix in prefix_set:
new_worker_group = self.from_detached(worker_names=self._worker_names,
ray_cls_with_init=self.ray_cls_with_init)
_rebind_actor_methods(new_worker_group, prefix)
new_worker_group_dict[prefix] = new_worker_group
return new_worker_group_dict
def execute_rank_zero_sync(self, method_name: str, *args, **kwargs):
return ray.get(self.execute_rank_zero_async(method_name, *args, **kwargs))
def execute_rank_zero_async(self, method_name: str, *args, **kwargs):
remote_call = getattr(self._workers[0], method_name)
return remote_call.remote(*args, **kwargs)
def execute_rank_zero(self, method_name: str, *args, **kwargs):
return self.execute_rank_zero_async(method_name, *args, **kwargs)
def execute_all(self, method_name: str, *args, **kwargs):
return self.execute_all_async(method_name, *args, **kwargs)
def execute_all_sync(self, method_name: str, *args, **kwargs):
return ray.get(self.execute_all_async(method_name, *args, **kwargs))
def execute_all_async(self, method_name: str, *args, **kwargs):
# Here, we assume that if all arguments in args and kwargs are lists, and their lengths match len(self._workers),
# we'll distribute each element in these lists to the corresponding worker
# print(f"execute_all_async: method {method_name}({args}, {kwargs})")
length = len(self._workers)
if all(isinstance(arg, list) for arg in args) and all(isinstance(kwarg, list) for kwarg in kwargs.values()):
if all(len(arg) == length for arg in args) and all(len(kwarg) == length for kwarg in kwargs.values()):
# print(f"splitting args and kwargs into {length} shards")
result = []
for i in range(length):
sliced_args = tuple(arg[i] for arg in args)
sliced_kwargs = {k: v[i] for k, v in kwargs.items()}
remote_call = getattr(self._workers[i], method_name)
result.append(remote_call.remote(*sliced_args, **sliced_kwargs))
return result
return [getattr(worker, method_name).remote(*args, **kwargs) for worker in self._workers]
@property
def master_address(self):
return self._master_addr
@property
def master_port(self):
return self._master_port
@property
def workers(self):
return self._workers
@property
def world_size(self):
return self._world_size
"""
Utilities that enables creating workers inside the same ray.Actor,
with code written in separate ray.Actors.
"""
from unittest.mock import patch
from verl.single_controller.base.decorator import MAGIC_ATTR
import os
def _bind_workers_method_to_parent(cls, key, user_defined_cls):
"""
Binds the methods of each worker to the WorkerDict.
Note that we only bind public methods that are decorated by register
"""
for method_name in dir(user_defined_cls):
try:
method = getattr(user_defined_cls, method_name)
assert callable(method), f"{method_name} in {user_defined_cls} is not callable"
except Exception as e:
# if it is a property, it will fail because Class doesn't have instance property
continue
if hasattr(method, MAGIC_ATTR):
def generate_function(name):
def func(self, *args, **kwargs):
# dispatch to the actual worker
return getattr(self.worker_dict[key], name)(*args, **kwargs)
return func
func = generate_function(method_name)
# pass MAGIC_ATTR for outer worker group
setattr(func, MAGIC_ATTR, getattr(method, MAGIC_ATTR))
try:
method_name_with_prefix = key + '_' + method_name
setattr(cls, method_name_with_prefix, func)
# print(f'Binding {method_name_with_prefix}')
except Exception as e:
raise ValueError(f'Fail to set method_name {method_name}')
def _unwrap_ray_remote(cls):
if hasattr(cls, '__ray_actor_class__'):
cls = cls.__ray_actor_class__
return cls
def create_colocated_worker_cls(class_dict: dict[str, RayClassWithInitArgs]):
"""
This function should return a class instance that delegates the calls to every
cls in cls_dict
"""
cls_dict = {}
init_args_dict = {}
worker_cls = None
for key, cls in class_dict.items():
if worker_cls == None:
worker_cls = cls.cls.__ray_actor_class__.__base__
else:
assert worker_cls == cls.cls.__ray_actor_class__.__base__, \
'the worker class should be the same when share the same process'
cls_dict[key] = cls.cls
init_args_dict[key] = {'args': cls.args, 'kwargs': cls.kwargs}
assert cls_dict.keys() == init_args_dict.keys()
# TODO: create a class with customizable name
class WorkerDict(worker_cls):
def __init__(self):
super().__init__()
self.worker_dict = {}
for key, user_defined_cls in cls_dict.items():
user_defined_cls = _unwrap_ray_remote(user_defined_cls)
# directly instantiate the class without remote
with patch.dict(os.environ, {'DISABLE_WORKER_INIT': '1'}):
self.worker_dict[key] = user_defined_cls(*init_args_dict[key].get('args', ()),
**init_args_dict[key].get('kwargs', {}))
# now monkey-patch the methods from inner class to WorkerDict
for key, user_defined_cls in cls_dict.items():
user_defined_cls = _unwrap_ray_remote(user_defined_cls)
_bind_workers_method_to_parent(WorkerDict, key, user_defined_cls)
remote_cls = ray.remote(WorkerDict)
remote_cls = RayClassWithInitArgs(cls=remote_cls)
return remote_cls
================================================
FILE: verl/single_controller/ray/megatron.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
import ray
from .base import RayWorkerGroup, RayResourcePool, RayClassWithInitArgs
from verl.single_controller.base.megatron.worker import DistRankInfo, DistGlobalInfo
from verl.single_controller.base.megatron.worker_group import MegatronWorkerGroup
# NOTE(sgm): for open-source megatron-core
class NVMegatronRayWorkerGroup(RayWorkerGroup, MegatronWorkerGroup):
"""
MegatronWorkerGroup will query each worker of its megatron rank info and store it inside the WorkerGroup
so that the dispatcher can use it to dispatch data.
"""
def __init__(self, resource_pool: RayResourcePool, ray_cls_with_init: RayClassWithInitArgs, **kwargs):
super().__init__(resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init, **kwargs)
self._megatron_rank_info: DistRankInfo = self.execute_all_sync(method_name='get_megatron_rank_info')
self._megatron_global_info: DistGlobalInfo = ray.get(
self.execute_rank_zero_async(method_name='get_megatron_global_info'))
class MegatronRayWorkerGroup(RayWorkerGroup, MegatronWorkerGroup):
"""
MegatronWorkerGroup will query each worker of its megatron rank info and store it inside the WorkerGroup
so that the dispatcher can use it to dispatch data.
"""
def __init__(self,
resource_pool: RayResourcePool,
ray_cls_with_init: RayClassWithInitArgs,
default_megatron_kwargs: Dict = None,
**kwargs):
super().__init__(resource_pool=resource_pool,
ray_cls_with_init=ray_cls_with_init,
default_megatron_kwargs=default_megatron_kwargs,
**kwargs)
self.init_megatron(default_megatron_kwargs=default_megatron_kwargs)
self._megatron_rank_info: DistRankInfo = self.execute_all_sync(method_name='get_megatron_rank_info')
self._megatron_global_info: DistGlobalInfo = ray.get(
self.execute_rank_zero_async(method_name='get_megatron_global_info'))
def init_megatron(self, default_megatron_kwargs: Optional[Dict] = None):
# after super, we will call init of each worker
if not self._is_init_with_detached_workers:
# only init_megatron if the WorkerGroup is created from scratch
self.execute_all_sync(method_name='init_megatron', default_megatron_kwargs=default_megatron_kwargs)
================================================
FILE: verl/third_party/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/third_party/vllm/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from importlib.metadata import version, PackageNotFoundError
from packaging import version as vs
def get_version(pkg):
try:
return version(pkg)
except PackageNotFoundError:
return None
package_name = 'vllm'
package_version = get_version(package_name)
vllm_version = None
if package_version == '0.3.1':
vllm_version = '0.3.1'
from .vllm_v_0_3_1.llm import LLM
from .vllm_v_0_3_1.llm import LLMEngine
from .vllm_v_0_3_1 import parallel_state
elif package_version == '0.4.2':
vllm_version = '0.4.2'
from .vllm_v_0_4_2.llm import LLM
from .vllm_v_0_4_2.llm import LLMEngine
from .vllm_v_0_4_2 import parallel_state
elif package_version == '0.5.4':
vllm_version = '0.5.4'
from .vllm_v_0_5_4.llm import LLM
from .vllm_v_0_5_4.llm import LLMEngine
from .vllm_v_0_5_4 import parallel_state
elif package_version == '0.6.3':
vllm_version = '0.6.3'
from .vllm_v_0_6_3.llm import LLM
from .vllm_v_0_6_3.llm import LLMEngine
from .vllm_v_0_6_3 import parallel_state
elif vs.parse(package_version) >= vs.parse('0.6.6.post2.dev252+g8027a724'):
# From 0.6.6.post2 on, vllm supports SPMD inference
# See https://github.com/vllm-project/vllm/pull/12071
from vllm import LLM
from vllm.distributed import parallel_state
from .vllm_spmd.dtensor_weight_loaders import load_dtensor_weights
else:
raise ValueError(
f'vllm version {package_version} not supported. Currently supported versions are 0.3.1, 0.4.2, 0.5.4, 0.6.3 and 0.7.0+'
)
================================================
FILE: verl/third_party/vllm/vllm_spmd/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/third_party/vllm/vllm_spmd/dtensor_weight_loaders.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/model_loader
from typing import Dict
import torch.nn as nn
from torch.distributed._tensor import DTensor
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.utils import is_pp_missing_parameter
def gemma_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
for param_name, shard_name, shard_id in stacked_params_mapping:
if shard_name not in name:
continue
stacked_name = name.replace(shard_name, param_name)
# Skip loading extra bias for GPTQ models.
if stacked_name.endswith(".bias") and stacked_name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[stacked_name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# lm_head is not used in vllm as it is tied with embed_token.
# To prevent errors, skip loading lm_head.weight.
if "lm_head.weight" in name:
continue
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def gptbigcode_dtensor_load_weights(actor_weights: Dict, vllm_model: nn.Module):
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "lm_head.weight" in name:
continue
if ".attn.bias" in name:
# Skip attention mask.
# NOTE: "c_attn.bias" should not be skipped.
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def starcoder2_dtensor_load_weights(actor_weights: Dict, vllm_model: nn.Module):
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
]
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def llama_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
(".qkv_proj", ".q_proj", "q"),
(".qkv_proj", ".k_proj", "k"),
(".qkv_proj", ".v_proj", "v"),
(".gate_up_proj", ".gate_proj", 0),
(".gate_up_proj", ".up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
if "rotary_emb.cos_cached" in name or "rotary_emb.sin_cached" in name:
# Models trained using ColossalAI may include these tensors in
# the checkpoint. Skip them.
continue
# With tie_word_embeddings, we can skip lm_head.weight
# The weight might appear unnecessarily in the files if the model is
# processed with quantization, LoRA, fine-tuning, etc.
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight)
def qwen2_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def qwen2vl_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
if "visual" in name:
continue
name = "language_model." + name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if "visual" in name:
name = name
else:
name = "language_model." + name
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
from vllm.model_executor.layers.fused_moe import FusedMoE
def deepseekv2_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
expert_params_mapping = FusedMoE.make_expert_params_mapping(
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=vllm_model.config.n_routed_experts,
)
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
# Skip non-stacked layers and experts (experts handled below).
if weight_name not in name:
continue
# We have mlp.experts[0].gate_proj in the checkpoint.
# Since we handle the experts below in expert_params_mapping,
# we need to skip here BEFORE we update the name, otherwise
# name will be updated to mlp.experts[0].gate_up_proj, which
# will then be updated below in expert_params_mapping
# for mlp.experts[0].gate_gate_up_proj, which breaks load.
if ("mlp.experts." in name) and name not in params_dict:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, vllm_model):
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
for mapping in expert_params_mapping:
param_name, weight_name, expert_id, shard_id = mapping
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
if is_pp_missing_parameter(name, vllm_model):
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(
param,
local_loaded_weight.to(dtype=param.dtype),
weight_name,
shard_id=shard_id,
expert_id=expert_id,
)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, vllm_model):
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def gpt2_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
pass
def redistribute_dtensor(param_name: str, loaded_weights: DTensor, parallelize_plan: Dict = None):
param_name = _process_parameter_names(name=param_name)
if parallelize_plan is not None:
assert (
param_name
in parallelize_plan.keys()), f"param name: {param_name} not in parallelize_plan :{parallelize_plan.keys()}"
placement = parallelize_plan[param_name]
local_loaded_weights = loaded_weights.redistribute(device_mesh=loaded_weights.device_mesh,
placements=placement).to_local()
else:
local_loaded_weights = loaded_weights.full_tensor()
return local_loaded_weights
def _process_parameter_names(name):
# Remove '.weight' if it exists at the end of the string
if name.endswith(".weight"):
name = name[:-7]
# Remove 'model.layers.x.' or 'model.' prefix
if "model.layers" in name:
parts = name.split(".")
# Reconstruct the string without 'model.layers.x.'
name = ".".join(parts[3:]) # parts[0] is 'model', parts[1] is 'layers', parts[2] is 'x'
elif name.startswith("model."):
name = name[6:] # Remove 'model.'
return name
__MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__ = {
"GPT2LMHeadModel": gpt2_dtensor_weight_loader,
"LlamaForCausalLM": llama_dtensor_weight_loader,
"LLaMAForCausalLM": llama_dtensor_weight_loader,
"MistralForCausalLM": llama_dtensor_weight_loader, # mistral is the same as llama in vLLM
"InternLMForCausalLM": llama_dtensor_weight_loader,
"AquilaModel": llama_dtensor_weight_loader,
"AquilaForCausalLM": llama_dtensor_weight_loader,
"Phi3ForCausalLM": llama_dtensor_weight_loader,
"GemmaForCausalLM": gemma_dtensor_weight_loader,
"Gemma2ForCausalLM": gemma_dtensor_weight_loader,
"GPTBigCodeForCausalLM": gptbigcode_dtensor_load_weights,
"Starcoder2ForCausalLM": starcoder2_dtensor_load_weights,
"Qwen2ForCausalLM": qwen2_dtensor_weight_loader,
"DeepseekV2ForCausalLM": deepseekv2_dtensor_weight_loader,
"Qwen2VLForConditionalGeneration": qwen2vl_dtensor_weight_loader,
"Qwen2_5_VLForConditionalGeneration": qwen2vl_dtensor_weight_loader,
}
# the actor model is .state_dict()
# Load dtensor weights
def load_dtensor_weights(actor_weights: Dict, vllm_model: nn.Module):
weight_loader = _get_model_weight_loader(vllm_model.__class__.__name__)
weight_loader(actor_weights, vllm_model)
# NOTE(sgm) to reduce peak memory usage, we offload vllm model to cpu
# after init, and we need this after sync model weights for in first iter.
vllm_model = vllm_model.cuda()
def _get_model_weight_loader(arch: str):
if arch in __MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__:
return __MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__[arch]
raise ValueError(f"Model architectures {arch} are not supported for now. "
f"Supported architectures: {__MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__.keys()}")
# NOTE(sgm): we use per-parameter weight loader in each vllm sub
def update_dtensor_weight_loader():
pass
================================================
FILE: verl/third_party/vllm/vllm_v_0_3_1/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/third_party/vllm/vllm_v_0_3_1/arg_utils.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/engine/arg_utils.py
import argparse
import dataclasses
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
import torch.nn as nn
from vllm.config import (CacheConfig, DeviceConfig, ModelConfig, ParallelConfig, SchedulerConfig, LoRAConfig)
from transformers import PretrainedConfig
from .config import ModelConfig
@dataclass
class EngineArgs:
"""Arguments for vLLM engine."""
model_hf_config: PretrainedConfig = None
dtype: str = 'auto'
kv_cache_dtype: str = 'auto'
seed: int = 0
max_model_len: Optional[int] = None
worker_use_ray: bool = False
pipeline_parallel_size: int = 1
tensor_parallel_size: int = 1
max_parallel_loading_workers: Optional[int] = None
block_size: int = 16
swap_space: int = 4 # GiB
gpu_memory_utilization: float = 0.90
max_num_batched_tokens: Optional[int] = None
max_num_seqs: int = 256
max_paddings: int = 256
disable_log_stats: bool = False
revision: Optional[str] = None
tokenizer_revision: Optional[str] = None
quantization: Optional[str] = None
load_format: str = 'model'
enforce_eager: bool = False
max_context_len_to_capture: int = 8192
disable_custom_all_reduce: bool = False
enable_lora: bool = False
max_loras: int = 1
max_lora_rank: int = 16
lora_extra_vocab_size: int = 256
lora_dtype = 'auto'
max_cpu_loras: Optional[int] = None
device: str = 'cuda'
@staticmethod
def add_cli_args(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Shared CLI arguments for vLLM engine."""
# Model arguments
# TODO(shengguangming): delete the unused args
parser.add_argument('--model',
type=str,
default='facebook/opt-125m',
help='name or path of the huggingface model to use')
parser.add_argument('--tokenizer',
type=str,
default=EngineArgs.tokenizer,
help='name or path of the huggingface tokenizer to use')
parser.add_argument('--revision',
type=str,
default=None,
help='the specific model version to use. It can be a branch '
'name, a tag name, or a commit id. If unspecified, will use '
'the default version.')
parser.add_argument('--tokenizer-revision',
type=str,
default=None,
help='the specific tokenizer version to use. It can be a branch '
'name, a tag name, or a commit id. If unspecified, will use '
'the default version.')
parser.add_argument('--tokenizer-mode',
type=str,
default=EngineArgs.tokenizer_mode,
choices=['auto', 'slow'],
help='tokenizer mode. "auto" will use the fast '
'tokenizer if available, and "slow" will '
'always use the slow tokenizer.')
parser.add_argument('--trust-remote-code', action='store_true', help='trust remote code from huggingface')
parser.add_argument('--download-dir',
type=str,
default=EngineArgs.download_dir,
help='directory to download and load the weights, '
'default to the default cache dir of '
'huggingface')
parser.add_argument('--load-format',
type=str,
default=EngineArgs.load_format,
choices=['auto', 'pt', 'safetensors', 'npcache', 'dummy'],
help='The format of the model weights to load. '
'"auto" will try to load the weights in the safetensors format '
'and fall back to the pytorch bin format if safetensors format '
'is not available. '
'"pt" will load the weights in the pytorch bin format. '
'"safetensors" will load the weights in the safetensors format. '
'"npcache" will load the weights in pytorch format and store '
'a numpy cache to speed up the loading. '
'"dummy" will initialize the weights with random values, '
'which is mainly for profiling.')
parser.add_argument('--dtype',
type=str,
default=EngineArgs.dtype,
choices=['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'],
help='data type for model weights and activations. '
'The "auto" option will use FP16 precision '
'for FP32 and FP16 models, and BF16 precision '
'for BF16 models.')
parser.add_argument('--max-model-len',
type=int,
default=None,
help='model context length. If unspecified, '
'will be automatically derived from the model.')
# Parallel arguments
parser.add_argument('--worker-use-ray',
action='store_true',
help='use Ray for distributed serving, will be '
'automatically set when using more than 1 GPU')
parser.add_argument('--pipeline-parallel-size',
'-pp',
type=int,
default=EngineArgs.pipeline_parallel_size,
help='number of pipeline stages')
parser.add_argument('--tensor-parallel-size',
'-tp',
type=int,
default=EngineArgs.tensor_parallel_size,
help='number of tensor parallel replicas')
# KV cache arguments
parser.add_argument('--block-size',
type=int,
default=EngineArgs.block_size,
choices=[8, 16, 32],
help='token block size')
# TODO(woosuk): Support fine-grained seeds (e.g., seed per request).
parser.add_argument('--seed', type=int, default=EngineArgs.seed, help='random seed')
parser.add_argument('--swap-space',
type=int,
default=EngineArgs.swap_space,
help='CPU swap space size (GiB) per GPU')
parser.add_argument('--gpu-memory-utilization',
type=float,
default=EngineArgs.gpu_memory_utilization,
help='the percentage of GPU memory to be used for'
'the model executor')
parser.add_argument('--max-num-batched-tokens',
type=int,
default=EngineArgs.max_num_batched_tokens,
help='maximum number of batched tokens per '
'iteration')
parser.add_argument('--max-num-seqs',
type=int,
default=EngineArgs.max_num_seqs,
help='maximum number of sequences per iteration')
parser.add_argument('--disable-log-stats', action='store_true', help='disable logging statistics')
# Quantization settings.
parser.add_argument('--quantization',
'-q',
type=str,
choices=['awq', None],
default=None,
help='Method used to quantize the weights')
return parser
@classmethod
def from_cli_args(cls, args: argparse.Namespace) -> 'EngineArgs':
# Get the list of attributes of this dataclass.
attrs = [attr.name for attr in dataclasses.fields(cls)]
# Set the attributes from the parsed arguments.
engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
return engine_args
def create_engine_configs(
self,
) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig]:
device_config = DeviceConfig(self.device)
model_config = ModelConfig(self.model_hf_config, self.dtype, self.seed, self.load_format, self.revision,
self.tokenizer_revision, self.max_model_len, self.quantization, self.enforce_eager,
self.max_context_len_to_capture)
cache_config = CacheConfig(self.block_size, self.gpu_memory_utilization, self.swap_space, self.kv_cache_dtype,
model_config.get_sliding_window())
parallel_config = ParallelConfig(self.pipeline_parallel_size, self.tensor_parallel_size, self.worker_use_ray,
self.max_parallel_loading_workers, self.disable_custom_all_reduce)
scheduler_config = SchedulerConfig(self.max_num_batched_tokens, self.max_num_seqs, model_config.max_model_len,
self.max_paddings)
lora_config = LoRAConfig(max_lora_rank=self.max_lora_rank,
max_loras=self.max_loras,
lora_extra_vocab_size=self.lora_extra_vocab_size,
lora_dtype=self.lora_dtype,
max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras and self.max_cpu_loras > 0 else
None) if self.enable_lora else None
return (model_config, cache_config, parallel_config, scheduler_config, device_config, lora_config)
@dataclass
class AsyncEngineArgs(EngineArgs):
"""Arguments for asynchronous vLLM engine."""
engine_use_ray: bool = False
disable_log_requests: bool = False
max_log_len: Optional[int] = None
@staticmethod
def add_cli_args(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parser = EngineArgs.add_cli_args(parser)
parser.add_argument('--engine-use-ray',
action='store_true',
help='use Ray to start the LLM engine in a '
'separate process as the server process.')
parser.add_argument('--disable-log-requests', action='store_true', help='disable logging requests')
parser.add_argument('--max-log-len',
type=int,
default=None,
help='max number of prompt characters or prompt '
'ID numbers being printed in log. '
'Default: unlimited.')
return parser
================================================
FILE: verl/third_party/vllm/vllm_v_0_3_1/config.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/config.py
from typing import Optional, Union, ClassVar
from dataclasses import dataclass
import torch
from transformers import PretrainedConfig
from packaging.version import Version
from vllm.logger import init_logger
from vllm.transformers_utils.config import get_config
from vllm.utils import get_cpu_memory, is_hip, get_nvcc_cuda_version
logger = init_logger(__name__)
_GB = 1 << 30
class ModelConfig:
"""Configuration for the model.
Args:
model: Name or path of the huggingface model to use.
tokenizer: Name or path of the huggingface tokenizer to use.
tokenizer_mode: Tokenizer mode. "auto" will use the fast tokenizer if
available, and "slow" will always use the slow tokenizer.
trust_remote_code: Trust remote code (e.g., from HuggingFace) when
downloading the model and tokenizer.
download_dir: Directory to download and load the weights, default to the
default cache directory of huggingface.
load_format: The format of the model weights to load:
"auto" will try to load the weights in the safetensors format and
fall back to the pytorch bin format if safetensors format is
not available.
"pt" will load the weights in the pytorch bin format.
"safetensors" will load the weights in the safetensors format.
"npcache" will load the weights in pytorch format and store
a numpy cache to speed up the loading.
"dummy" will initialize the weights with random values, which is
mainly for profiling.
dtype: Data type for model weights and activations. The "auto" option
will use FP16 precision for FP32 and FP16 models, and BF16 precision
for BF16 models.
seed: Random seed for reproducibility.
revision: The specific model version to use. It can be a branch name,
a tag name, or a commit id. If unspecified, will use the default
version.
tokenizer_revision: The specific tokenizer version to use. It can be a
branch name, a tag name, or a commit id. If unspecified, will use
the default version.
max_model_len: Maximum length of a sequence (including prompt and
output). If None, will be derived from the model.
quantization: Quantization method that was used to quantize the model
weights. If None, we assume the model weights are not quantized.
enforce_eager: Whether to enforce eager execution. If True, we will
disable CUDA graph and always execute the model in eager mode.
If False, we will use CUDA graph and eager execution in hybrid.
max_context_len_to_capture: Maximum context len covered by CUDA graphs.
When a sequence has context length larger than this, we fall back
to eager mode.
"""
def __init__(
self,
hf_config: PretrainedConfig,
dtype: str,
seed: int,
load_format: str = 'model',
revision: Optional[str] = None,
tokenizer_revision: Optional[str] = None,
max_model_len: Optional[int] = None,
quantization: Optional[str] = None,
trust_remote_code: Optional[bool] = True,
enforce_eager: bool = False,
max_context_len_to_capture: Optional[int] = None,
) -> None:
self.model = hf_config._name_or_path
self.tokenizer = hf_config._name_or_path
self.load_format = load_format
self.seed = seed
self.revision = revision
self.tokenizer_revision = tokenizer_revision
self.quantization = quantization
self.trust_remote_code = trust_remote_code
self.enforce_eager = enforce_eager
self.max_context_len_to_capture = max_context_len_to_capture
# self.hf_config = get_config(model, trust_remote_code, revision)
self.hf_config = hf_config
self.dtype = _get_and_verify_dtype(self.hf_config, dtype)
self.max_model_len = _get_and_verify_max_len(self.hf_config, max_model_len)
# self._verify_load_format()
# self._verify_tokenizer_mode()
self._verify_quantization()
self._verify_cuda_graph()
def _verify_load_format(self) -> None:
load_format = self.load_format.lower()
if load_format not in ["auto", "pt", "safetensors", "npcache", "dummy", "model"]:
raise ValueError(f"Unknown load format: {self.load_format}. Must be one of "
"'auto', 'pt', 'safetensors', 'npcache', 'dummy' or 'model'.")
self.load_format = load_format
# def _verify_tokenizer_mode(self) -> None:
# tokenizer_mode = self.tokenizer_mode.lower()
# if tokenizer_mode not in ["auto", "slow"]:
# raise ValueError(
# f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be "
# "either 'auto' or 'slow'.")
# self.tokenizer_mode = tokenizer_mode
def _verify_quantization(self) -> None:
supported_quantization = ["awq", "gptq", "squeezellm"]
rocm_not_supported_quantization = ["awq", "gptq"]
if self.quantization is not None:
self.quantization = self.quantization.lower()
# Parse quantization method from the HF model config, if available.
hf_quant_config = getattr(self.hf_config, "quantization_config", None)
if hf_quant_config is not None:
hf_quant_method = str(hf_quant_config["quant_method"]).lower()
if self.quantization is None:
self.quantization = hf_quant_method
elif self.quantization != hf_quant_method:
raise ValueError("Quantization method specified in the model config "
f"({hf_quant_method}) does not match the quantization "
f"method specified in the `quantization` argument "
f"({self.quantization}).")
if self.quantization is not None:
if self.quantization not in supported_quantization:
raise ValueError(f"Unknown quantization method: {self.quantization}. Must "
f"be one of {supported_quantization}.")
if is_hip() and self.quantization in rocm_not_supported_quantization:
raise ValueError(f"{self.quantization} quantization is currently not supported "
f"in ROCm.")
logger.warning(f"{self.quantization} quantization is not fully "
"optimized yet. The speed can be slower than "
"non-quantized models.")
def _verify_cuda_graph(self) -> None:
if self.max_context_len_to_capture is None:
self.max_context_len_to_capture = self.max_model_len
self.max_context_len_to_capture = min(self.max_context_len_to_capture, self.max_model_len)
if (self.quantization in ["gptq", "squeezellm"] and not self.enforce_eager):
# Related issue: https://github.com/vllm-project/vllm/issues/2147
logger.warning(f"{self.quantization} does not support CUDA graph "
"yet. Disabling CUDA graph.")
self.enforce_eager = True
def verify_with_parallel_config(
self,
parallel_config: "ParallelConfig",
) -> None:
total_num_attention_heads = self.hf_config.num_attention_heads
tensor_parallel_size = parallel_config.tensor_parallel_size
if total_num_attention_heads % tensor_parallel_size != 0:
raise ValueError(f"Total number of attention heads ({total_num_attention_heads})"
" must be divisible by tensor parallel size "
f"({tensor_parallel_size}).")
total_num_hidden_layers = self.hf_config.num_hidden_layers
pipeline_parallel_size = parallel_config.pipeline_parallel_size
if total_num_hidden_layers % pipeline_parallel_size != 0:
raise ValueError(f"Total number of hidden layers ({total_num_hidden_layers}) "
"must be divisible by pipeline parallel size "
f"({pipeline_parallel_size}).")
def get_sliding_window(self) -> Optional[int]:
return getattr(self.hf_config, "sliding_window", None)
def get_vocab_size(self) -> int:
return self.hf_config.vocab_size
def get_hidden_size(self) -> int:
return self.hf_config.hidden_size
def get_head_size(self) -> int:
# FIXME(woosuk): This may not be true for all models.
return self.hf_config.hidden_size // self.hf_config.num_attention_heads
def get_total_num_kv_heads(self) -> int:
"""Returns the total number of KV heads."""
# For GPTBigCode & Falcon:
# NOTE: for falcon, when new_decoder_architecture is True, the
# multi_query flag is ignored and we use n_head_kv for the number of
# KV heads.
falcon_model_types = ["falcon", "RefinedWeb", "RefinedWebModel"]
new_decoder_arch_falcon = (self.hf_config.model_type in falcon_model_types and
getattr(self.hf_config, "new_decoder_architecture", False))
if not new_decoder_arch_falcon and getattr(self.hf_config, "multi_query", False):
# Multi-query attention, only one KV head.
# Currently, tensor parallelism is not supported in this case.
return 1
attributes = [
# For Falcon:
"n_head_kv",
"num_kv_heads",
# For LLaMA-2:
"num_key_value_heads",
# For ChatGLM:
"multi_query_group_num",
]
for attr in attributes:
num_kv_heads = getattr(self.hf_config, attr, None)
if num_kv_heads is not None:
return num_kv_heads
# For non-grouped-query attention models, the number of KV heads is
# equal to the number of attention heads.
return self.hf_config.num_attention_heads
def get_num_kv_heads(self, parallel_config: "ParallelConfig") -> int:
"""Returns the number of KV heads per GPU."""
total_num_kv_heads = self.get_total_num_kv_heads()
# If tensor parallelism is used, we divide the number of KV heads by
# the tensor parallel size. We will replicate the KV heads in the
# case where the number of KV heads is smaller than the tensor
# parallel size so each GPU has at least one KV head.
return max(1, total_num_kv_heads // parallel_config.tensor_parallel_size)
def get_num_layers(self, parallel_config: "ParallelConfig") -> int:
total_num_hidden_layers = self.hf_config.num_hidden_layers
return total_num_hidden_layers // parallel_config.pipeline_parallel_size
class CacheConfig:
"""Configuration for the KV cache.
Args:
block_size: Size of a cache block in number of tokens.
gpu_memory_utilization: Fraction of GPU memory to use for the
vLLM execution.
swap_space: Size of the CPU swap space per GPU (in GiB).
cache_dtype: Data type for kv cache storage.
"""
def __init__(
self,
block_size: int,
gpu_memory_utilization: float,
swap_space: int,
cache_dtype: str,
sliding_window: Optional[int] = None,
) -> None:
self.block_size = block_size
self.gpu_memory_utilization = gpu_memory_utilization
self.swap_space_bytes = swap_space * _GB
self.cache_dtype = cache_dtype
self.sliding_window = sliding_window
self._verify_args()
self._verify_cache_dtype()
# Will be set after profiling.
self.num_gpu_blocks = None
self.num_cpu_blocks = None
def _verify_args(self) -> None:
if self.gpu_memory_utilization > 1.0:
raise ValueError("GPU memory utilization must be less than 1.0. Got "
f"{self.gpu_memory_utilization}.")
def _verify_cache_dtype(self) -> None:
if self.cache_dtype == "auto":
pass
elif self.cache_dtype == "fp8_e5m2":
nvcc_cuda_version = get_nvcc_cuda_version()
if nvcc_cuda_version < Version("11.8"):
raise ValueError("FP8 is not supported when cuda version is lower than 11.8.")
device_name = torch.cuda.get_device_name()
if "AMD" in device_name:
raise NotImplementedError("FP8_E5M2 KV Cache on AMD GPU has not been supported yet.")
logger.info("Using fp8_e5m2 data type to store kv cache. It reduces "
"the GPU memory footprint and boosts the performance. "
"But it may cause slight accuracy drop. "
"Currently we only support fp8 without scaling factors and "
"make e5m2 as a default format.")
else:
raise ValueError(f"Unknown kv cache dtype: {self.cache_dtype}")
def verify_with_parallel_config(
self,
parallel_config: "ParallelConfig",
) -> None:
total_cpu_memory = get_cpu_memory()
# FIXME(woosuk): Here, it is assumed that the GPUs in a tensor parallel
# group are in the same node. However, the GPUs may span multiple nodes.
num_gpus_per_node = parallel_config.tensor_parallel_size
cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node
msg = (f"{cpu_memory_usage / _GB:.2f} GiB out of "
f"the {total_cpu_memory / _GB:.2f} GiB total CPU memory is "
"allocated for the swap space.")
if cpu_memory_usage > 0.7 * total_cpu_memory:
raise ValueError("Too large swap space. " + msg)
elif cpu_memory_usage > 0.4 * total_cpu_memory:
logger.warning("Possibly too large swap space. " + msg)
class ParallelConfig:
"""Configuration for the distributed execution.
Args:
pipeline_parallel_size: Number of pipeline parallel groups.
tensor_parallel_size: Number of tensor parallel groups.
worker_use_ray: Whether to use Ray for model workers. Will be set to
True if either pipeline_parallel_size or tensor_parallel_size is
greater than 1.
max_parallel_loading_workers: Maximum number of multiple batches
when load model sequentially. To avoid RAM OOM when using tensor
parallel and large models.
disable_custom_all_reduce: Disable the custom all-reduce kernel and
fall back to NCCL.
"""
def __init__(
self,
pipeline_parallel_size: int,
tensor_parallel_size: int,
worker_use_ray: bool,
max_parallel_loading_workers: Optional[int] = None,
disable_custom_all_reduce: bool = False,
) -> None:
self.pipeline_parallel_size = pipeline_parallel_size
self.tensor_parallel_size = tensor_parallel_size
self.worker_use_ray = worker_use_ray
self.max_parallel_loading_workers = max_parallel_loading_workers
self.disable_custom_all_reduce = disable_custom_all_reduce
self.world_size = pipeline_parallel_size * tensor_parallel_size
if self.world_size > 1:
self.worker_use_ray = True
self._verify_args()
def _verify_args(self) -> None:
if self.pipeline_parallel_size > 1:
raise NotImplementedError("Pipeline parallelism is not supported yet.")
if not self.disable_custom_all_reduce and self.world_size > 1:
if is_hip():
self.disable_custom_all_reduce = True
logger.info("Disabled the custom all-reduce kernel because it is not "
"supported on AMD GPUs.")
elif self.pipeline_parallel_size > 1:
self.disable_custom_all_reduce = True
logger.info("Disabled the custom all-reduce kernel because it is not "
"supported with pipeline parallelism.")
# FIXME(woosuk): Fix the stability issues and re-enable the custom
# all-reduce kernel.
if not self.disable_custom_all_reduce and self.world_size > 1:
self.disable_custom_all_reduce = True
logger.info("Custom all-reduce kernels are temporarily disabled due to "
"stability issues. We will re-enable them once the issues are "
"resolved.")
class SchedulerConfig:
"""Scheduler configuration.
Args:
max_num_batched_tokens: Maximum number of tokens to be processed in
a single iteration.
max_num_seqs: Maximum number of sequences to be processed in a single
iteration.
max_model_len: Maximum length of a sequence (including prompt
and generated text).
max_paddings: Maximum number of paddings to be added to a batch.
"""
def __init__(
self,
max_num_batched_tokens: Optional[int],
max_num_seqs: int,
max_model_len: int,
max_paddings: int,
) -> None:
if max_num_batched_tokens is not None:
self.max_num_batched_tokens = max_num_batched_tokens
else:
# If max_model_len is too short, use 2048 as the default value for
# higher throughput.
self.max_num_batched_tokens = max(max_model_len, 2048)
self.max_num_seqs = max_num_seqs
self.max_model_len = max_model_len
self.max_paddings = max_paddings
self._verify_args()
def _verify_args(self) -> None:
if self.max_num_batched_tokens < self.max_model_len:
raise ValueError(f"max_num_batched_tokens ({self.max_num_batched_tokens}) is "
f"smaller than max_model_len ({self.max_model_len}). "
"This effectively limits the maximum sequence length to "
"max_num_batched_tokens and makes vLLM reject longer "
"sequences. Please increase max_num_batched_tokens or "
"decrease max_model_len.")
if self.max_num_batched_tokens < self.max_num_seqs:
raise ValueError(f"max_num_batched_tokens ({self.max_num_batched_tokens}) must "
"be greater than or equal to max_num_seqs "
f"({self.max_num_seqs}).")
class DeviceConfig:
def __init__(self, device: str = "cuda") -> None:
self.device = torch.device(device)
@dataclass
class LoRAConfig:
max_lora_rank: int
max_loras: int
max_cpu_loras: Optional[int] = None
lora_dtype: Optional[torch.dtype] = None
lora_extra_vocab_size: int = 256
# This is a constant.
lora_vocab_padding_size: ClassVar[int] = 256
def __post_init__(self):
# Keep this in sync with csrc/punica/bgmv/bgmv_config.h
possible_max_ranks = (8, 16, 32, 64)
possible_lora_extra_vocab_size = (0, 256, 512)
if self.max_lora_rank not in possible_max_ranks:
raise ValueError(f"max_lora_rank ({self.max_lora_rank}) must be one of "
f"{possible_max_ranks}.")
if self.lora_extra_vocab_size not in possible_lora_extra_vocab_size:
raise ValueError(f"lora_extra_vocab_size ({self.lora_extra_vocab_size}) "
f"must be one of {possible_lora_extra_vocab_size}.")
if self.max_loras < 1:
raise ValueError(f"max_loras ({self.max_loras}) must be >= 1.")
if self.max_cpu_loras is None:
self.max_cpu_loras = self.max_loras
elif self.max_cpu_loras < self.max_loras:
raise ValueError(f"max_cpu_loras ({self.max_cpu_loras}) must be >= "
f"max_loras ({self.max_loras})")
def verify_with_model_config(self, model_config: ModelConfig):
if self.lora_dtype in (None, "auto"):
self.lora_dtype = model_config.dtype
elif isinstance(self.lora_dtype, str):
self.lora_dtype = getattr(torch, self.lora_dtype)
if model_config.quantization is not None:
raise ValueError("LoRA is not supported with quantized models yet.")
def verify_with_scheduler_config(self, scheduler_config: SchedulerConfig):
if scheduler_config.max_num_batched_tokens > 65528:
raise ValueError("Due to limitations of the custom LoRA CUDA kernel, "
"max_num_batched_tokens must be <= 65528 when "
"LoRA is enabled.")
_STR_DTYPE_TO_TORCH_DTYPE = {
"half": torch.float16,
"float16": torch.float16,
"float": torch.float32,
"float32": torch.float32,
"bfloat16": torch.bfloat16,
}
_ROCM_NOT_SUPPORTED_DTYPE = ["float", "float32"]
def _get_and_verify_dtype(
config: PretrainedConfig,
dtype: Union[str, torch.dtype],
) -> torch.dtype:
# NOTE: getattr(config, "torch_dtype", torch.float32) is not correct
# because config.torch_dtype can be None.
config_dtype = getattr(config, "torch_dtype", None)
if config_dtype is None:
config_dtype = torch.float32
if isinstance(dtype, str):
dtype = dtype.lower()
if dtype == "auto":
if config_dtype == torch.float32:
# Following the common practice, we use float16 for float32
# models.
torch_dtype = torch.float16
else:
torch_dtype = config_dtype
else:
if dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
raise ValueError(f"Unknown dtype: {dtype}")
torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype]
elif isinstance(dtype, torch.dtype):
torch_dtype = dtype
else:
raise ValueError(f"Unknown dtype: {dtype}")
if is_hip() and torch_dtype == torch.float32:
rocm_supported_dtypes = [
k for k, v in _STR_DTYPE_TO_TORCH_DTYPE.items() if (k not in _ROCM_NOT_SUPPORTED_DTYPE)
]
raise ValueError(f"dtype \'{dtype}\' is not supported in ROCm. "
f"Supported dtypes are {rocm_supported_dtypes}")
# Verify the dtype.
if torch_dtype != config_dtype:
if torch_dtype == torch.float32:
# Upcasting to float32 is allowed.
pass
elif config_dtype == torch.float32:
# Downcasting from float32 to float16 or bfloat16 is allowed.
pass
else:
# Casting between float16 and bfloat16 is allowed with a warning.
logger.warning(f"Casting {config_dtype} to {torch_dtype}.")
return torch_dtype
def _get_and_verify_max_len(
hf_config: PretrainedConfig,
max_model_len: Optional[int],
) -> int:
"""Get and verify the model's maximum length."""
derived_max_model_len = float("inf")
possible_keys = [
# OPT
"max_position_embeddings",
# GPT-2
"n_positions",
# MPT
"max_seq_len",
# ChatGLM2
"seq_length",
# Others
"max_sequence_length",
"max_seq_length",
"seq_len",
]
for key in possible_keys:
max_len_key = getattr(hf_config, key, None)
if max_len_key is not None:
derived_max_model_len = min(derived_max_model_len, max_len_key)
if derived_max_model_len == float("inf"):
if max_model_len is not None:
# If max_model_len is specified, we use it.
return max_model_len
default_max_len = 2048
logger.warning("The model's config.json does not contain any of the following "
"keys to determine the original maximum length of the model: "
f"{possible_keys}. Assuming the model's maximum length is "
f"{default_max_len}.")
derived_max_model_len = default_max_len
rope_scaling = getattr(hf_config, "rope_scaling", None)
if rope_scaling is not None:
assert "factor" in rope_scaling
scaling_factor = rope_scaling["factor"]
if rope_scaling["type"] == "yarn":
derived_max_model_len = rope_scaling["original_max_position_embeddings"]
derived_max_model_len *= scaling_factor
if max_model_len is None:
max_model_len = derived_max_model_len
elif max_model_len > derived_max_model_len:
raise ValueError(f"User-specified max_model_len ({max_model_len}) is greater than "
f"the derived max_model_len ({max_len_key}={derived_max_model_len}"
" in model's config.json). This may lead to incorrect model "
"outputs or CUDA errors. Make sure the value is correct and "
"within the model context size.")
return int(max_model_len)
================================================
FILE: verl/third_party/vllm/vllm_v_0_3_1/llm.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/entrypoints/llm.py
from typing import Dict, List, Optional, Tuple, Union
from tqdm import tqdm
from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers import PretrainedConfig
import torch.nn as nn
from .arg_utils import EngineArgs
from .llm_engine_sp import LLMEngine
from vllm.lora.request import LoRARequest
from vllm.outputs import RequestOutput
from vllm.sampling_params import SamplingParams
from vllm.utils import Counter
import torch
from torch.nn.utils.rnn import pad_sequence
from verl.workers.rollout.tokenizer import HybridEngineBaseTokenizer
class LLM:
"""An LLM for generating texts from given prompts and sampling parameters.
This class includes a tokenizer, a language model (possibly distributed
across multiple GPUs), and GPU memory space allocated for intermediate
states (aka KV cache). Given a batch of prompts and sampling parameters,
this class generates texts from the model, using an intelligent batching
mechanism and efficient memory management.
NOTE: This class is intended to be used for offline inference. For online
serving, use the `AsyncLLMEngine` class instead.
NOTE: For the comprehensive list of arguments, see `EngineArgs`.
Args:
model: A HuggingFace Transformers model instance.
tokenizer: A HuggingFace Transformers tokenizer instance.
tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
if available, and "slow" will always use the slow tokenizer.
trust_remote_code: Trust remote code (e.g., from HuggingFace) when
downloading the model and tokenizer.
tensor_parallel_size: The number of GPUs to use for distributed
execution with tensor parallelism.
dtype: The data type for the model weights and activations. Currently,
we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
the `torch_dtype` attribute specified in the model config file.
However, if the `torch_dtype` in the config is `float32`, we will
use `float16` instead.
quantization: The method used to quantize the model weights. Currently,
we support "awq". If None, we assume the model weights are not
quantized and use `dtype` to determine the data type of the weights.
revision: The specific model version to use. It can be a branch name,
a tag name, or a commit id.
tokenizer_revision: The specific tokenizer version to use. It can be a
branch name, a tag name, or a commit id.
seed: The seed to initialize the random number generator for sampling.
gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
reserve for the model weights, activations, and KV cache. Higher
values will increase the KV cache size and thus improve the model's
throughput. However, if the value is too high, it may cause out-of-
memory (OOM) errors.
swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
This can be used for temporarily storing the states of the requests
when their `best_of` sampling parameters are larger than 1. If all
requests will have `best_of=1`, you can safely set this to 0.
Otherwise, too small values may cause out-of-memory (OOM) errors.
enforce_eager: Whether to enforce eager execution. If True, we will
disable CUDA graph and always execute the model in eager mode.
If False, we will use CUDA graph and eager execution in hybrid.
max_context_len_to_capture: Maximum context len covered by CUDA graphs.
When a sequence has context length larger than this, we fall back
to eager mode.
disable_custom_all_reduce: See ParallelConfig
"""
def __init__(
self,
model: Union[nn.Module, Dict], # model itself or its parameter dict
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast, HybridEngineBaseTokenizer],
model_hf_config: PretrainedConfig,
tokenizer_mode: str = "auto",
trust_remote_code: bool = False,
tensor_parallel_size: int = 1,
dtype: str = "auto",
quantization: Optional[str] = None,
revision: Optional[str] = None,
tokenizer_revision: Optional[str] = None,
seed: int = 0,
gpu_memory_utilization: float = 0.9,
swap_space: int = 4,
enforce_eager: bool = False,
max_context_len_to_capture: int = 8192,
disable_custom_all_reduce: bool = False,
**kwargs,
) -> None:
if "disable_log_stats" not in kwargs:
kwargs["disable_log_stats"] = True
engine_args = EngineArgs(
model_hf_config=model_hf_config,
tensor_parallel_size=tensor_parallel_size,
dtype=dtype,
quantization=quantization,
revision=revision,
tokenizer_revision=tokenizer_revision,
seed=seed,
gpu_memory_utilization=gpu_memory_utilization,
swap_space=swap_space,
enforce_eager=enforce_eager,
max_context_len_to_capture=max_context_len_to_capture,
disable_custom_all_reduce=disable_custom_all_reduce,
**kwargs,
)
tokenizer_cls = (PreTrainedTokenizer, PreTrainedTokenizerFast, HybridEngineBaseTokenizer)
if not isinstance(tokenizer, tokenizer_cls):
raise ValueError(
f"Unexpected tokenizer type: {type(tokenizer)}. Must be"
"one of the following: PreTrainedTokenizer, PreTrainedTokenizerFast, verl.workers.rollout.HybridEngineBaseTokenizer"
)
self.llm_engine = LLMEngine.from_engine_args(model, tokenizer, engine_args)
self.request_counter = Counter()
def init_cache_engine(self):
self.llm_engine.init_cache_engine()
def free_cache_engine(self):
self.llm_engine.free_cache_engine()
def get_tokenizer(self) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]:
return self.llm_engine.tokenizer
def set_tokenizer(
self,
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
) -> None:
self.llm_engine.tokenizer = tokenizer
def generate(
self,
prompts: Optional[Union[str, List[str]]] = None,
sampling_params: Optional[SamplingParams] = None,
prompt_token_ids: Optional[List[List[int]]] = None,
prefix_pos: Optional[Union[int, List[int]]] = None,
use_tqdm: bool = True,
lora_request: Optional[LoRARequest] = None,
) -> List[RequestOutput]:
"""Generates the completions for the input prompts.
NOTE: This class automatically batches the given prompts, considering
the memory constraint. For the best performance, put all of your prompts
into a single list and pass it to this method.
Args:
prompts: A list of prompts to generate completions for.
sampling_params: The sampling parameters for text generation. If
None, we use the default sampling parameters.
prompt_token_ids: A list of token IDs for the prompts. If None, we
use the tokenizer to convert the prompts to token IDs.
use_tqdm: Whether to use tqdm to display the progress bar.
Returns:
A list of `RequestOutput` objects containing the generated
completions in the same order as the input prompts.
"""
if prompts is None and prompt_token_ids is None:
raise ValueError("Either prompts or prompt_token_ids must be "
"provided.")
if isinstance(prompts, str):
# Convert a single prompt to a list.
prompts = [prompts]
if prompts is not None and prompt_token_ids is not None:
if len(prompts) != len(prompt_token_ids):
raise ValueError("The lengths of prompts and prompt_token_ids "
"must be the same.")
if sampling_params is None:
# Use default sampling params.
sampling_params = SamplingParams()
# Add requests to the engine.
num_requests = len(prompts) if prompts is not None else len(prompt_token_ids)
for i in range(num_requests):
prompt = prompts[i] if prompts is not None else None
prefix_pos_i = prefix_pos[i] if prefix_pos is not None else None
token_ids = None if prompt_token_ids is None else prompt_token_ids[i]
if not isinstance(token_ids, list):
# NOTE(shengguangming): convert the rollout input into List[str]
token_ids = self._pre_process_inputs(token_ids)
self._add_request(prompt, sampling_params, token_ids, lora_request=lora_request, prefix_pos=prefix_pos_i)
return self._run_engine(use_tqdm)
def _add_request(
self,
prompt: Optional[str],
sampling_params: SamplingParams,
prompt_token_ids: Optional[List[int]],
lora_request: Optional[LoRARequest] = None,
prefix_pos: Optional[int] = None,
) -> None:
request_id = str(next(self.request_counter))
self.llm_engine.add_request(request_id,
prompt,
sampling_params,
prompt_token_ids,
lora_request=lora_request,
prefix_pos=prefix_pos)
def _run_engine(self, use_tqdm: bool) -> List[RequestOutput]:
# Initialize tqdm.
if use_tqdm:
num_requests = self.llm_engine.get_num_unfinished_requests()
pbar = tqdm(total=num_requests, desc="Processed prompts")
# Run the engine.
outputs: List[RequestOutput] = []
while self.llm_engine.has_unfinished_requests():
step_outputs = self.llm_engine.step()
for output in step_outputs:
if output.finished:
outputs.append(output)
if use_tqdm:
pbar.update(1)
if use_tqdm:
pbar.close()
# Sort the outputs by request ID.
# This is necessary because some requests may be finished earlier than
# its previous requests.
outputs = sorted(outputs, key=lambda x: int(x.request_id))
# TODO(shengguangming): maybe we can hack the autoregressive logics without only apply post process for better performance
return self._post_process_outputs(outputs)
# NOTE(shengguangming): add for verl
# TODO(sgm): we can optimize it by making the dataloader yield List[int] without padding.
def _pre_process_inputs(self, prompt_token_ids: torch.Tensor) -> List[int]:
# remove the left padding in the prompt token_id
pad_token_id = self.llm_engine.tokenizer.pad_token_id if self.llm_engine.tokenizer.pad_token_id is not None else self.llm_engine.tokenizer.eos_token_id
non_pad_index = torch.nonzero(prompt_token_ids != pad_token_id, as_tuple=False)[0][0]
token_ids = prompt_token_ids[non_pad_index:].tolist()
return token_ids
# NOTE(shengguangming): add for verl
def _post_process_outputs(self, outputs: List[RequestOutput]) -> Tuple[torch.Tensor, torch.Tensor]:
output_token_ids = []
logprobs = []
for output in outputs: # List[RequestOutput]
output = output.outputs
for output in output: # List[CompletionOutput], usually len == 1
output_token_ids.append(torch.tensor(output.token_ids))
# TODO(shengguangming): can be optimzied by rewrite the Sampler._get_logprobs() logits
logprobs_dicts = output.logprobs
if logprobs_dicts is not None:
logprob = []
for logprobs_dict, id in zip(logprobs_dicts, output.token_ids):
logprob.append(logprobs_dict[id])
logprobs.append(torch.tensor(logprob))
pad_token_id = self.llm_engine.tokenizer.pad_token_id if self.llm_engine.tokenizer.pad_token_id is not None else self.llm_engine.tokenizer.eos_token_id
output_token_ids = pad_sequence(output_token_ids, batch_first=True, padding_value=pad_token_id)
if len(logprobs) > 0:
logprobs = pad_sequence(logprobs, batch_first=True, padding_value=pad_token_id)
return output_token_ids, logprobs
def sync_model_weights(self, actor_weights: Dict[str, torch.Tensor]) -> None:
self.llm_engine.sync_model_weights(actor_weights=actor_weights)
def offload_model_weights(self) -> None:
self.llm_engine.offload_model_weights()
================================================
FILE: verl/third_party/vllm/vllm_v_0_3_1/llm_engine_sp.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/engine/llm_engine.py
import os
import socket
import time
import torch
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union
from vllm.lora.request import LoRARequest
from vllm.config import (CacheConfig, DeviceConfig, ModelConfig, ParallelConfig, SchedulerConfig, LoRAConfig)
from vllm.core.scheduler import Scheduler, SchedulerOutputs
from vllm.logger import init_logger
from vllm.outputs import RequestOutput
from vllm.sampling_params import SamplingParams
from vllm.sequence import (SamplerOutput, Sequence, SequenceGroup, SequenceGroupMetadata, SequenceGroupOutput,
SequenceOutput, SequenceStatus)
from vllm.transformers_utils.tokenizer import detokenize_incrementally
from vllm.engine.metrics import StatLogger, Stats
from vllm.utils import Counter
import torch.nn as nn
from .arg_utils import EngineArgs
from .tokenizer import TokenizerGroup
logger = init_logger(__name__)
_LOCAL_LOGGING_INTERVAL_SEC = 5
class LLMEngine:
"""An LLM engine that receives requests and generates texts.
This is the main class for the vLLM engine. It receives requests
from clients and generates texts from the LLM. It includes a tokenizer, a
language model (possibly distributed across multiple GPUs), and GPU memory
space allocated for intermediate states (aka KV cache). This class utilizes
iteration-level scheduling and efficient memory management to maximize the
serving throughput.
The `LLM` class wraps this class for offline batched inference and the
`AsyncLLMEngine` class wraps this class for online serving.
NOTE: The config arguments are derived from the `EngineArgs` class. For the
comprehensive list of arguments, see `EngineArgs`.
Args:
model_config: The configuration related to the LLM model.
cache_config: The configuration related to the KV cache memory
management.
parallel_config: The configuration related to distributed execution.
scheduler_config: The configuration related to the request scheduler.
distributed_init_method: The initialization method for distributed
execution. See `torch.distributed.init_process_group` for details.
placement_group: Ray placement group for distributed execution.
Required for distributed execution.
log_stats: Whether to log statistics.
"""
def __init__(
self,
model: Union[nn.Module, Dict], # model itself or its parameter dict
tokenizer: nn.Module,
model_config: ModelConfig,
cache_config: CacheConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
lora_config: Optional[LoRAConfig],
distributed_init_method: str,
placement_group: Optional[None],
log_stats: bool,
) -> None:
logger.info("Initializing an LLM engine with config: "
f"model={model_config.model!r}, "
f"tokenizer={model_config.tokenizer!r}, "
# f"tokenizer_mode={model_config.tokenizer_mode}, "
f"revision={model_config.revision}, "
f"tokenizer_revision={model_config.tokenizer_revision}, "
# f"trust_remote_code={model_config.trust_remote_code}, "
f"dtype={model_config.dtype}, "
f"max_seq_len={model_config.max_model_len}, "
# f"download_dir={model_config.download_dir!r}, "
# f"load_format={model_config.load_format}, "
f"disable_custom_all_reduce={parallel_config.disable_custom_all_reduce}, "
f"tensor_parallel_size={parallel_config.tensor_parallel_size}, "
f"quantization={model_config.quantization}, "
f"seed={model_config.seed})")
# TODO(woosuk): Print more configs in debug mode.
self.model_config = model_config # TODO: currently is hfconfig
self.cache_config = cache_config
self.lora_config = lora_config
assert self.cache_config.sliding_window == getattr(self.model_config.hf_config, "sliding_window", None)
self.parallel_config = parallel_config
self.scheduler_config = scheduler_config
self.device_config = device_config
self.log_stats = log_stats
self._verify_args()
# self.model = model # should not store the model, it should be deleted
# TODO(shengguangming): maybe we can choose init here or from arguments
self._init_tokenizer(tokenizer)
self.seq_counter = Counter()
# Create the parallel GPU workers.
self._init_workers_sp(model, distributed_init_method)
# Profile the memory usage and initialize the cache.
self._init_cache_sp()
# Create the scheduler.
# NOTE(shengguangming): each process will have independent scheduler
self.scheduler = Scheduler(scheduler_config, cache_config, lora_config)
# Metric Logging.
if self.log_stats:
self.stat_logger = StatLogger(local_interval=_LOCAL_LOGGING_INTERVAL_SEC)
# Logging.
self.last_logging_time = 0.0
# List of (timestamp, num_tokens)
self.num_prompt_tokens: List[Tuple[float, int]] = []
# List of (timestamp, num_tokens)
self.num_generation_tokens: List[Tuple[float, int]] = []
def _init_tokenizer(self, tokenizer, **tokenizer_init_kwargs):
init_kwargs = dict(enable_lora=bool(self.lora_config),
max_num_seqs=self.scheduler_config.max_num_seqs,
max_input_length=None)
init_kwargs.update(tokenizer_init_kwargs)
self.tokenizer: TokenizerGroup = TokenizerGroup(tokenizer, **init_kwargs)
# TODO: check get_lora_tokenizer func
def get_tokenizer_for_seq(self, sequence: Sequence):
return self.tokenizer.get_lora_tokenizer(sequence.lora_request)
def _init_workers_sp(self, model, distributed_init_method: str):
# Lazy import the Worker to avoid importing torch.cuda/xformers
# before CUDA_VISIBLE_DEVICES is set in the Worker
from .worker import Worker # pylint: disable=import-outside-toplevel
rank = int(os.getenv("RANK"))
self.worker = Worker(
model,
self.model_config,
self.parallel_config,
self.scheduler_config,
self.device_config,
rank,
distributed_init_method,
lora_config=self.lora_config,
kv_cache_dtype=self.cache_config.cache_dtype,
)
# NOTE(shengguangming): torch.distributed.init_process_group will be called inside the init_model()
self.worker.init_model()
self.worker.load_model()
def _verify_args(self) -> None:
self.model_config.verify_with_parallel_config(self.parallel_config)
self.cache_config.verify_with_parallel_config(self.parallel_config)
def _init_cache_sp(self) -> None:
"""Profiles the memory usage and initializes the KV cache."""
# Get the maximum number of blocks that can be allocated on GPU and CPU.
num_blocks = self.worker.profile_num_available_blocks(
block_size=self.cache_config.block_size,
gpu_memory_utilization=self.cache_config.gpu_memory_utilization,
cpu_swap_space=self.cache_config.swap_space_bytes,
cache_dtype=self.cache_config.cache_dtype,
)
# NOTE(shengguangming): Now we don't use a shared centralized controler but each process will
# have its own scheduler
num_gpu_blocks = num_blocks[0]
num_cpu_blocks = num_blocks[1]
# FIXME(woosuk): Change to debug log.
logger.info(f"# GPU blocks: {num_gpu_blocks}, "
f"# CPU blocks: {num_cpu_blocks}")
if num_gpu_blocks <= 0:
raise ValueError("No available memory for the cache blocks. "
"Try increasing `gpu_memory_utilization` when "
"initializing the engine.")
max_seq_len = self.cache_config.block_size * num_gpu_blocks
if self.model_config.max_model_len > max_seq_len:
raise ValueError(f"The model's max seq len ({self.model_config.max_model_len}) "
"is larger than the maximum number of tokens that can be "
f"stored in KV cache ({max_seq_len}). Try increasing "
"`gpu_memory_utilization` or decreasing `max_model_len` when "
"initializing the engine.")
self.cache_config.num_gpu_blocks = num_gpu_blocks
self.cache_config.num_cpu_blocks = num_cpu_blocks
# Initialize the cache.
self.worker.init_cache_engine(cache_config=self.cache_config)
self.worker.warm_up_model()
def init_cache_engine(self):
self.worker.init_cache_engine(cache_config=self.cache_config)
def free_cache_engine(self):
self.worker.free_cache_engine()
@classmethod
def from_engine_args(cls, model, tokenizer, engine_args: EngineArgs) -> "LLMEngine":
"""Creates an LLM engine from the engine arguments."""
# Create the engine configs.
engine_configs = engine_args.create_engine_configs()
parallel_config = engine_configs[2]
# Initialize the cluster.
distributed_init_method, placement_group = initialize_cluster(parallel_config)
# Create the LLM engine.
engine = cls(model,
tokenizer,
*engine_configs,
distributed_init_method,
placement_group,
log_stats=not engine_args.disable_log_stats)
return engine
def add_request(
self,
request_id: str,
prompt: Optional[str],
sampling_params: SamplingParams,
prompt_token_ids: Optional[List[int]] = None,
arrival_time: Optional[float] = None,
lora_request: Optional[LoRARequest] = None,
prefix_pos: Optional[int] = None,
) -> None:
"""Add a request to the engine's request pool.
The request is added to the request pool and will be processed by the
scheduler as `engine.step()` is called. The exact scheduling policy is
determined by the scheduler.
Args:
request_id: The unique ID of the request.
prompt: The prompt string. Can be None if prompt_token_ids is
provided.
sampling_params: The sampling parameters for text generation.
prompt_token_ids: The token IDs of the prompt. If None, we
use the tokenizer to convert the prompts to token IDs.
arrival_time: The arrival time of the request. If None, we use
the current monotonic time.
prefix_pos: If not None, we use the given position as the prefix
position for each prompt. We will cache the prefix's KV
cache and reuse it for the next request with the same prefix.
This is an experimental feature, and may be replaced with
automatic prefix caching in the future.
Details:
- Set arrival_time to the current time if it is None.
- Set prompt_token_ids to the encoded prompt if it is None.
- Create `best_of` number of :class:`~vllm.Sequence` objects.
- Create a :class:`~vllm.SequenceGroup` object
from the list of :class:`~vllm.Sequence`.
- Add the :class:`~vllm.SequenceGroup` object to the scheduler.
Example:
>>> # initialize engine
>>> engine = LLMEngine.from_engine_args(engine_args)
>>> # set request arguments
>>> example_prompt = "Who is the president of the United States?"
>>> sampling_params = SamplingParams(temperature=0.0)
>>> request_id = 0
>>>
>>> # add the request to the engine
>>> engine.add_request(
>>> str(request_id),
>>> example_prompt,
>>> SamplingParams(temperature=0.0))
>>> # continue the request processing
>>> ...
"""
if lora_request is not None and not self.lora_config:
raise ValueError(f"Got lora_request {lora_request} but LoRA is "
"not enabled!")
if arrival_time is None:
arrival_time = time.monotonic()
if prompt_token_ids is None:
assert prompt is not None
prompt_token_ids = self.tokenizer.encode(prompt)
# Create the sequences.
block_size = self.cache_config.block_size
seq_id = next(self.seq_counter)
seq = Sequence(seq_id, prompt, prompt_token_ids, block_size, lora_request)
# Check whether the input specifies prefix
prefix = self.scheduler.prefix_pool.add_or_get_prefix(prompt_token_ids[:prefix_pos], lora_request.lora_int_id if
lora_request else 0) if prefix_pos is not None else None
# Create the sequence group.
seq_group = SequenceGroup(request_id, [seq], sampling_params, arrival_time, lora_request, prefix)
# Add the sequence group to the scheduler.
self.scheduler.add_seq_group(seq_group)
def abort_request(self, request_id: Union[str, Iterable[str]]) -> None:
"""Aborts a request(s) with the given ID.
Args:
request_id: The ID(s) of the request to abort.
Details:
- Refer to the
:meth:`~vllm.core.scheduler.Scheduler.abort_seq_group`
from class :class:`~vllm.core.scheduler.Scheduler`.
Example:
>>> # initialize engine and add a request with request_id
>>> request_id = str(0)
>>> # abort the request
>>> engine.abort_request(request_id)
"""
self.scheduler.abort_seq_group(request_id)
def get_model_config(self) -> ModelConfig:
"""Gets the model configuration."""
return self.model_config
def get_num_unfinished_requests(self) -> int:
"""Gets the number of unfinished requests."""
return self.scheduler.get_num_unfinished_seq_groups()
def has_unfinished_requests(self) -> bool:
"""Returns True if there are unfinished requests."""
return self.scheduler.has_unfinished_seqs()
def _check_beam_search_early_stopping(
self,
early_stopping: Union[bool, str],
sampling_params: SamplingParams,
best_running_seq: Sequence,
current_worst_seq: Sequence,
) -> bool:
assert sampling_params.use_beam_search
length_penalty = sampling_params.length_penalty
if early_stopping is True:
return True
current_worst_score = (current_worst_seq.get_beam_search_score(
length_penalty=length_penalty, eos_token_id=self.get_tokenizer_for_seq(current_worst_seq).eos_token_id))
if early_stopping is False:
highest_attainable_score = (best_running_seq.get_beam_search_score(
length_penalty=length_penalty, eos_token_id=self.get_tokenizer_for_seq(best_running_seq).eos_token_id))
else:
assert early_stopping == "never"
if length_penalty > 0.0:
# If length_penalty > 0.0, beam search will prefer longer
# sequences. The highest attainable score calculation is
# based on the longest possible sequence length in this case.
max_possible_length = max(best_running_seq.get_prompt_len() + sampling_params.max_tokens,
self.scheduler_config.max_model_len)
highest_attainable_score = (best_running_seq.get_beam_search_score(
length_penalty=length_penalty,
eos_token_id=self.get_tokenizer_for_seq(best_running_seq).eos_token_id,
seq_len=max_possible_length))
else:
# Otherwise, beam search will prefer shorter sequences. The
# highest attainable score calculation is based on the current
# sequence length.
highest_attainable_score = (best_running_seq.get_beam_search_score(
length_penalty=length_penalty,
eos_token_id=self.get_tokenizer_for_seq(best_running_seq).eos_token_id))
def _process_sequence_group_outputs(self, seq_group: SequenceGroup, outputs: SequenceGroupOutput) -> None:
# Process prompt logprobs
prompt_logprobs = outputs.prompt_logprobs
if prompt_logprobs is not None:
seq_group.prompt_logprobs = prompt_logprobs
# Process samples
samples = outputs.samples
parent_seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING)
existing_finished_seqs = seq_group.get_finished_seqs()
parent_child_dict = {parent_seq.seq_id: [] for parent_seq in parent_seqs}
for sample in samples:
parent_child_dict[sample.parent_seq_id].append(sample)
# List of (child, parent)
child_seqs: List[Tuple[Sequence, Sequence]] = []
# Process the child samples for each parent sequence
for parent in parent_seqs:
child_samples: List[SequenceOutput] = parent_child_dict[parent.seq_id]
if len(child_samples) == 0:
# This parent sequence has no children samples. Remove
# the parent sequence from the sequence group since it will
# not be used in the future iterations.
parent.status = SequenceStatus.FINISHED_ABORTED
seq_group.remove(parent.seq_id)
self.scheduler.free_seq(parent)
continue
# Fork the parent sequence if there are multiple child samples.
for child_sample in child_samples[:-1]:
new_child_seq_id = next(self.seq_counter)
child = parent.fork(new_child_seq_id)
child.append_token_id(child_sample.output_token, child_sample.logprobs)
child_seqs.append((child, parent))
# Continue the parent sequence for the last child sample.
# We reuse the parent sequence here to reduce redundant memory
# copies, especially when using non-beam search sampling methods.
last_child_sample = child_samples[-1]
parent.append_token_id(last_child_sample.output_token, last_child_sample.logprobs)
child_seqs.append((parent, parent))
for seq, _ in child_seqs:
# self._decode_sequence(seq, seq_group.sampling_params)
self._check_stop(seq, seq_group.sampling_params)
# Non-beam search case
if not seq_group.sampling_params.use_beam_search:
# For newly created child sequences, add them to the sequence group
# and fork them in block manager if they are not finished.
for seq, parent in child_seqs:
if seq is not parent:
seq_group.add(seq)
if not seq.is_finished():
self.scheduler.fork_seq(parent, seq)
# Free the finished and selected parent sequences' memory in block
# manager. Keep them in the sequence group as candidate output.
# NOTE: we need to fork the new sequences before freeing the
# old sequences.
for seq, parent in child_seqs:
if seq is parent and seq.is_finished():
self.scheduler.free_seq(seq)
return
# Beam search case
# Select the child sequences to keep in the sequence group.
selected_child_seqs = []
unselected_child_seqs = []
beam_width = seq_group.sampling_params.best_of
length_penalty = seq_group.sampling_params.length_penalty
# Select the newly finished sequences with the highest scores
# to replace existing finished sequences.
# Tuple of (seq, parent, is_new)
existing_finished_seqs = [(seq, None, False) for seq in existing_finished_seqs]
new_finished_seqs = [(seq, parent, True) for seq, parent in child_seqs if seq.is_finished()]
all_finished_seqs = existing_finished_seqs + new_finished_seqs
# Sort the finished sequences by their scores.
all_finished_seqs.sort(key=lambda x: x[0].get_beam_search_score(
length_penalty=length_penalty, eos_token_id=self.get_tokenizer_for_seq(x[0]).eos_token_id),
reverse=True)
for seq, parent, is_new in all_finished_seqs[:beam_width]:
if is_new:
# A newly generated child sequence finishes and has a high
# score, so we will add it into the sequence group.
selected_child_seqs.append((seq, parent))
for seq, parent, is_new in all_finished_seqs[beam_width:]:
if is_new:
# A newly generated child sequence finishes but has a low
# score, so we will not add it into the sequence group.
# Additionally, if this sequence is a continuation of a
# parent sequence, we will need remove the parent sequence
# from the sequence group.
unselected_child_seqs.append((seq, parent))
else:
# An existing finished sequence has a low score, so we will
# remove it from the sequence group.
seq_group.remove(seq.seq_id)
# select the top beam_width sequences from the running
# sequences for the next iteration to continue the beam
# search.
running_child_seqs = [(seq, parent) for seq, parent in child_seqs if not seq.is_finished()]
# Sort the running sequences by their scores.
running_child_seqs.sort(key=lambda x: x[0].get_beam_search_score(
length_penalty=length_penalty, eos_token_id=self.get_tokenizer_for_seq(x[0]).eos_token_id),
reverse=True)
# Check if we can stop the beam search.
if len(running_child_seqs) == 0:
# No running sequences, stop the beam search.
stop_beam_search = True
elif len(all_finished_seqs) < beam_width:
# Not enough finished sequences, continue the beam search.
stop_beam_search = False
else:
# Check the early stopping criteria
best_running_seq = running_child_seqs[0][0]
current_worst_seq = all_finished_seqs[beam_width - 1][0]
stop_beam_search = self._check_beam_search_early_stopping(seq_group.sampling_params.early_stopping,
seq_group.sampling_params, best_running_seq,
current_worst_seq)
if stop_beam_search:
# Stop the beam search and remove all the running sequences from
# the sequence group.
unselected_child_seqs.extend(running_child_seqs)
else:
# Continue the beam search and select the top beam_width sequences
# to continue the beam search.
selected_child_seqs.extend(running_child_seqs[:beam_width])
# The remaining running sequences will not be used in the next
# iteration. Again, if these sequences are continuations of
# parent sequences, we will need to remove the parent sequences
# from the sequence group.
unselected_child_seqs.extend(running_child_seqs[beam_width:])
# For newly created child sequences, add them to the sequence group
# and fork them in block manager if they are not finished.
for seq, parent in selected_child_seqs:
if seq is not parent:
seq_group.add(seq)
if not seq.is_finished():
self.scheduler.fork_seq(parent, seq)
# Free the finished and selected parent sequences' memory in block
# manager. Keep them in the sequence group as candidate output.
for seq, parent in selected_child_seqs:
if seq is parent and seq.is_finished():
self.scheduler.free_seq(seq)
# Remove the unselected parent sequences from the sequence group and
# free their memory in block manager.
for seq, parent in unselected_child_seqs:
if seq is parent:
# Remove the parent sequence if it is not selected for next
# iteration
seq_group.remove(seq.seq_id)
self.scheduler.free_seq(seq)
def _process_model_outputs(self, output: SamplerOutput, scheduler_outputs: SchedulerOutputs) -> List[RequestOutput]:
# Update the scheduled sequence groups with the model outputs.
scheduled_seq_groups = scheduler_outputs.scheduled_seq_groups
for seq_group, outputs in zip(scheduled_seq_groups, output):
self._process_sequence_group_outputs(seq_group, outputs)
# Free the finished sequence groups.
self.scheduler.free_finished_seq_groups()
# Create the outputs.
request_outputs: List[RequestOutput] = []
for seq_group in scheduled_seq_groups:
request_output = RequestOutput.from_seq_group(seq_group)
request_outputs.append(request_output)
for seq_group in scheduler_outputs.ignored_seq_groups:
request_output = RequestOutput.from_seq_group(seq_group)
request_outputs.append(request_output)
# Update prefix state, now all the uncomputed prefixes are computed.
for seq_group in scheduled_seq_groups:
if (seq_group.prefix is not None and seq_group.prefix.allocated and not seq_group.prefix.computed):
seq_group.prefix.computed = True
# Log stats.
if self.log_stats:
self.stat_logger.log(self._get_stats(scheduler_outputs))
return request_outputs
def step(self) -> List[RequestOutput]:
"""Performs one decoding iteration and returns newly generated results.
This function performs one decoding iteration of the engine. It first
schedules the sequences to be executed in the next iteration and the
token blocks to be swapped in/out/copy. Then, it executes the model
and updates the scheduler with the model outputs. Finally, it decodes
the sequences and returns the newly generated results.
"""
seq_group_metadata_list, scheduler_outputs = self.scheduler.schedule()
if not scheduler_outputs.is_empty():
output = self.worker.execute_model(
seq_group_metadata_list=seq_group_metadata_list, # TODO: check this input
blocks_to_swap_in=scheduler_outputs.blocks_to_swap_in,
blocks_to_swap_out=scheduler_outputs.blocks_to_swap_out,
blocks_to_copy=scheduler_outputs.blocks_to_copy,)
else:
return [RequestOutput.from_seq_group(seq_group) for seq_group in scheduler_outputs.ignored_seq_groups]
return self._process_model_outputs(output, scheduler_outputs)
def do_log_stats(self) -> None:
"""Forced log when no requests active."""
if self.log_stats:
self.stat_logger.log(self._get_stats(scheduler_outputs=None))
def _get_stats(self, scheduler_outputs: Optional[SchedulerOutputs]) -> Stats:
"""Get Stats to be Logged to Prometheus."""
now = time.monotonic()
# KV Cache Usage in %.
num_total_gpu = self.cache_config.num_gpu_blocks
num_free_gpu = self.scheduler.block_manager.get_num_free_gpu_blocks()
gpu_cache_usage = 1.0 - (num_free_gpu / num_total_gpu)
num_total_cpu = self.cache_config.num_cpu_blocks
cpu_cache_usage = 0.
if num_total_cpu > 0:
num_free_cpu = self.scheduler.block_manager.get_num_free_cpu_blocks()
cpu_cache_usage = 1.0 - (num_free_cpu / num_total_cpu)
# Scheduler State
num_running = len(self.scheduler.running)
num_swapped = len(self.scheduler.swapped)
num_waiting = len(self.scheduler.waiting)
# Iteration stats if we have scheduler output.
num_prompt_tokens = 0
num_generation_tokens = 0
time_to_first_tokens = []
time_per_output_tokens = []
time_e2e_requests = []
if scheduler_outputs is not None:
prompt_run = scheduler_outputs.prompt_run
# Number of Tokens.
if prompt_run:
num_prompt_tokens = scheduler_outputs.num_batched_tokens
else:
num_generation_tokens = scheduler_outputs.num_batched_tokens
# Latency Timings.
time_last_iters = []
for seq_group in scheduler_outputs.scheduled_seq_groups:
# Time since last token. (n.b. updates seq_group.last_token_time)
time_last_iters.append(seq_group.get_last_latency(now))
# Time since arrival for all finished requests.
if seq_group.is_finished():
time_e2e_requests.append(now - seq_group.arrival_time)
time_to_first_tokens = time_last_iters if prompt_run else []
time_per_output_tokens = [] if prompt_run else time_last_iters
return Stats(
now=now,
num_running=num_running,
num_swapped=num_swapped,
num_waiting=num_waiting,
gpu_cache_usage=gpu_cache_usage,
cpu_cache_usage=cpu_cache_usage,
num_prompt_tokens=num_prompt_tokens,
num_generation_tokens=num_generation_tokens,
time_to_first_tokens=time_to_first_tokens,
time_per_output_tokens=time_per_output_tokens,
time_e2e_requests=time_e2e_requests,
)
# TODO: we may not need to decode
def _decode_sequence(self, seq: Sequence, prms: SamplingParams) -> None:
"""Decodes the new token for a sequence."""
(new_tokens, new_output_text, prefix_offset, read_offset) = detokenize_incrementally(
self.get_tokenizer_for_seq(seq),
all_input_ids=seq.get_token_ids(),
prev_tokens=seq.tokens,
prefix_offset=seq.prefix_offset,
read_offset=seq.read_offset,
skip_special_tokens=prms.skip_special_tokens,
spaces_between_special_tokens=prms.spaces_between_special_tokens,
)
if seq.tokens is None:
seq.tokens = new_tokens
else:
seq.tokens.extend(new_tokens)
seq.prefix_offset = prefix_offset
seq.read_offset = read_offset
seq.output_text += new_output_text
def _check_stop(self, seq: Sequence, sampling_params: SamplingParams) -> None:
"""Stop the finished sequences."""
# for stop_str in sampling_params.stop:
# if seq.output_text.endswith(stop_str):
# self._finalize_sequence(seq, sampling_params, stop_str)
# seq.status = SequenceStatus.FINISHED_STOPPED
# return
# if seq.get_last_token_id() in sampling_params.stop_token_ids:
# stop_str = self.get_tokenizer_for_seq(seq).convert_ids_to_tokens(seq.get_last_token_id())
# self._finalize_sequence(seq, sampling_params, stop_str)
# seq.status = SequenceStatus.FINISHED_STOPPED
# return
# Check if the sequence has reached max_model_len.
if seq.get_len() > self.scheduler_config.max_model_len:
seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED
return
# Check if the sequence has reached max_tokens.
if seq.get_output_len() == sampling_params.max_tokens:
seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED
return
# Check if the sequence has generated the EOS token.
if ((not sampling_params.ignore_eos) and
seq.get_last_token_id() == self.get_tokenizer_for_seq(seq).eos_token_id):
seq.status = SequenceStatus.FINISHED_STOPPED
return
def _finalize_sequence(self, seq: Sequence, sampling_params: SamplingParams, stop_string: str) -> None:
if not sampling_params.include_stop_str_in_output and stop_string:
# Truncate the output text so that the stop string is
# not included in the output.
seq.output_text = seq.output_text[:-len(stop_string)]
def add_lora(self, lora_request: LoRARequest) -> bool:
assert lora_request.lora_int_id > 0, "lora_id must be greater than 0."
return self.worker.add_lora(lora_request)
def remove_lora(self, lora_id: int) -> bool:
assert lora_id > 0, "lora_id must be greater than 0."
return self.worker.remove_lora(lora_id)
def list_loras(self) -> List[int]:
return self.worker.list_loras()
def sync_model_weights(self, actor_weights: Dict[str, torch.Tensor]) -> None:
self.worker.sync_model_weights(actor_weights=actor_weights)
def offload_model_weights(self) -> None:
self.worker.offload_model_weights()
def initialize_cluster(
parallel_config: ParallelConfig,
engine_use_ray: bool = False,
ray_address: Optional[str] = None,
) -> Tuple[str, Optional[None]]:
"""Initialize the distributed cluster probably with Ray.
Args:
parallel_config: The configurations for parallel execution.
engine_use_ray: Whether to use Ray for async engine.
ray_address: The address of the Ray cluster. If None, uses
the default Ray cluster address.
Returns:
A tuple of (`distributed_init_method`, `placement_group`). The
`distributed_init_method` is the address for initializing the
distributed backend. `placement_group` includes the specification
of the resources for each distributed worker.
"""
# Initialize cluster locally.
port = get_open_port()
# We need to setup the distributed init method to make sure
# the distributed megatron code (e.g., get world size) works correctly.
distributed_init_method = f"tcp://localhost:{port}"
return distributed_init_method, None
def get_open_port():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", 0))
return s.getsockname()[1]
================================================
FILE: verl/third_party/vllm/vllm_v_0_3_1/model_loader.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/model_loader
"""Utilities for selecting and loading models."""
import contextlib
from typing import Dict, Type, Union
import torch
import torch.nn as nn
from transformers import PretrainedConfig, PreTrainedModel
from megatron.core.tensor_parallel.utils import VocabUtility
from vllm.model_executor.models import ModelRegistry
from vllm.model_executor.weight_utils import (get_quant_config, initialize_dummy_weights)
from .config import ModelConfig
from vllm.config import DeviceConfig, LoRAConfig
from .weight_loaders import *
from vllm.model_executor.sampling_metadata import SamplingMetadata, SamplingTensors
from vllm.sequence import SamplerOutput
from typing import Optional
from vllm.model_executor.layers.sampler import Sampler
from vllm.model_executor.layers.sampler import _prune_hidden_states, _apply_logits_processors, _apply_penalties, _apply_top_k_top_p, _apply_min_p, _apply_penalties, _sample, _get_logprobs, _build_sampler_output
@contextlib.contextmanager
def _set_default_torch_dtype(dtype: torch.dtype):
"""Sets the default torch dtype to the given dtype."""
old_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
yield
torch.set_default_dtype(old_dtype)
def _get_model_architecture(config: PretrainedConfig) -> Type[nn.Module]:
architectures = getattr(config, "architectures", [])
for arch in architectures:
model_cls = ModelRegistry.load_model_cls(arch)
if model_cls is not None:
return model_cls
raise ValueError(f"Model architectures {architectures} are not supported for now. "
f"Supported architectures: {ModelRegistry.get_supported_archs()}")
from vllm.model_executor.layers.linear import *
from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmbedding, ParallelLMHead
from vllm.model_executor.layers.activation import ScaledActivation
__LAYER_WEIGHT_LOADER_REGISTRY__ = {
ColumnParallelLinear: parallel_weight_loader,
MergedColumnParallelLinear: parallel_weight_loader,
QKVParallelLinear: parallel_weight_loader,
RowParallelLinear: parallel_weight_loader,
VocabParallelEmbedding: parallel_weight_loader,
ParallelLMHead: parallel_weight_loader
# "ScaledActivation.weight_loader": ScaledActivation, # TODO(shengguangming): latest commit in vllm fix awq for this function and add load_weights
# "default_weight_loader": default_weight_loader
}
# NOTE(gmsheng): change the weight_loader function in runtime
for layer_class, weight_loader in __LAYER_WEIGHT_LOADER_REGISTRY__.items():
layer_class.weight_loader = weight_loader
__MODEL_WEIGHT_LOADER_REGISTRY__ = {
'GPT2LMHeadModel': gpt2_weight_loader,
'LlamaForCausalLM': llama_weight_loader,
'LLaMAForCausalLM': llama_weight_loader,
'MistralForCausalLM': mistral_weight_loader,
}
# FIXME(shengguangming): the vLLM vocab will pad to 64, which may incur out of bounds
# so we need to rewrite the init function of vocab
DEFAULT_VOCAB_PADDING_SIZE = 64
def vocab_init(self,
num_embeddings: int,
embedding_dim: int,
params_dtype: Optional[torch.dtype] = None,
org_num_embeddings: Optional[int] = None,
padding_size: int = DEFAULT_VOCAB_PADDING_SIZE):
super(VocabParallelEmbedding, self).__init__()
# Keep the input dimensions.
# TODO (pad to be divided by 4)
self.num_embeddings = num_embeddings
self.org_vocab_size = org_num_embeddings or num_embeddings
# self.num_embeddings_padded = pad_vocab_size(num_embeddings,
# padding_size)
self.embedding_dim = embedding_dim
if params_dtype is None:
params_dtype = torch.get_default_dtype()
self.tp_size = get_tensor_model_parallel_world_size()
# Divide the weight matrix along the vocaburaly dimension.
self.vocab_start_index, self.vocab_end_index = (VocabUtility.vocab_range_from_global_vocab_size(
self.num_embeddings, get_tensor_model_parallel_rank(), self.tp_size))
self.num_embeddings_per_partition = (self.vocab_end_index - self.vocab_start_index)
self.weight = Parameter(
torch.empty(
self.num_embeddings_per_partition,
self.embedding_dim,
# device=torch.cuda.current_device(),
dtype=params_dtype))
set_weight_attrs(self.weight, {"parallel_dim": 0, "weight_loader": self.weight_loader})
VocabParallelEmbedding.__init__ = vocab_init
def _get_model_weight_loader(arch: str):
if arch in __MODEL_WEIGHT_LOADER_REGISTRY__:
return __MODEL_WEIGHT_LOADER_REGISTRY__[arch]
raise ValueError(f"Model architectures {arch} are not supported for now. "
f"Supported architectures: {ModelRegistry.get_supported_archs()}")
def get_model(actor_model: Union[PreTrainedModel, Dict],
model_config: ModelConfig,
device_config: DeviceConfig,
lora_config: Optional[LoRAConfig] = None) -> nn.Module:
model_class = _get_model_architecture(model_config.hf_config)
# Get the quantization config.
linear_method = None
quant_config = None
if model_config.quantization is not None:
quant_config = get_quant_config(model_config.quantization, model_config.model, model_config.hf_config,
model_config.download_dir)
capability = torch.cuda.get_device_capability()
capability = capability[0] * 10 + capability[1]
if capability < quant_config.get_min_capability():
raise ValueError(f"The quantization method {model_config.quantization} is not "
"supported for the current GPU. "
f"Minimum capability: {quant_config.get_min_capability()}. "
f"Current capability: {capability}.")
supported_dtypes = quant_config.get_supported_act_dtypes()
if model_config.dtype not in supported_dtypes:
raise ValueError(f"{model_config.dtype} is not supported for quantization "
f"method {model_config.quantization}. Supported dtypes: "
f"{supported_dtypes}")
linear_method = quant_config.get_linear_method()
with _set_default_torch_dtype(model_config.dtype):
# Create a model instance.
# The weights will be initialized as empty tensors.
# with torch.device(device_config.device):
# NOTE(sgm): init the model in cpu
model = model_class(model_config.hf_config, linear_method)
if model_config.load_format == "dummy":
model = model.cuda()
# NOTE(woosuk): For accurate performance evaluation, we assign
# random values to the weights.
initialize_dummy_weights(model)
elif model_config.load_format == 'model' or model_config.load_format == 'auto':
# NOTE(shengguangming) Load the weights from the actor model
if isinstance(actor_model, nn.Module):
load_weights(actor_weights=dict(actor_model.named_parameters(remove_duplicate=False)), vllm_model=model)
else:
load_weights(actor_weights=actor_model, vllm_model=model)
# NOTE(sgm) Some weights are point to gpu, but still need this.
model = model.cuda() # NOTE (zhangchi.usc1992) We need this for vllm to profile memory usage
return model.eval()
# the actor model is .state_dict()
def load_weights(actor_weights: Dict, vllm_model: nn.Module):
weight_loader = _get_model_weight_loader(vllm_model.__class__.__name__)
weight_loader(actor_weights, vllm_model)
# NOTE(sgm) to reduce peak memory usage, we offload vllm model to cpu
# after init, and we need this after sync model weights for in first iter.
vllm_model = vllm_model.cuda()
# FIXME(sgm): hack the Sampler function in vllm v0.3.1
# as they use ray, the sampler result will only need to return to the driver node,
# therefore gather is enough. However, we use SPMD instead of a central scheduler,
# all_gather is required (aligned with v0.2.6)
def _get_logits(self, hidden_states: torch.Tensor, embedding: torch.Tensor,
embedding_bias: Optional[torch.Tensor]) -> torch.Tensor:
# Get the logits for the next tokens.
logits = torch.matmul(hidden_states, embedding.t())
if embedding_bias is not None:
logits += embedding_bias
logits = tensor_model_parallel_all_gather(logits)
# Remove paddings in vocab (if any).
if logits is not None:
logits = logits[:, :self.org_vocab_size]
return logits
def forward(
self,
embedding: torch.Tensor,
hidden_states: torch.Tensor,
sampling_metadata: SamplingMetadata,
embedding_bias: Optional[torch.Tensor] = None,
) -> Optional[SamplerOutput]:
# Get the hidden states that we use for sampling.
hidden_states = _prune_hidden_states(hidden_states, sampling_metadata)
# Get the logits for the next tokens.
logits = self._get_logits(hidden_states, embedding, embedding_bias)
# save origin logprobs for sampler_output
origin_logprobs = torch.log_softmax(logits, dim=-1, dtype=torch.float)
# Only perform sampling in the driver worker.
# Note: `_get_logits` is still distributed across TP workers because
# the `embedding` weight is distributed across TP workers.
# TODO(zhuohan): Change the get_logits part to a separate stage.
if not sampling_metadata.perform_sampling:
return None
assert logits is not None
_, vocab_size = logits.shape
# Apply logits processors (if any).
logits = _apply_logits_processors(logits, sampling_metadata)
# Prepare sampling tensors with pinned memory to avoid blocking.
(sampling_tensors, do_penalties, do_top_p_top_k,
do_min_p) = SamplingTensors.from_sampling_metadata(sampling_metadata, vocab_size, logits.device, logits.dtype)
# Apply presence and frequency penalties.
if do_penalties:
logits = _apply_penalties(logits, sampling_tensors.prompt_tokens, sampling_tensors.output_tokens,
sampling_tensors.presence_penalties, sampling_tensors.frequency_penalties,
sampling_tensors.repetition_penalties)
# Apply temperature scaling.
# Use in-place division to avoid creating a new tensor.
logits.div_(sampling_tensors.temperatures.unsqueeze_(dim=1))
if do_top_p_top_k:
logits = _apply_top_k_top_p(logits, sampling_tensors.top_ps, sampling_tensors.top_ks)
if do_min_p:
logits = _apply_min_p(logits, sampling_tensors.min_ps)
# We use float32 for probabilities and log probabilities.
# Compute the probabilities.
probs = torch.softmax(logits, dim=-1, dtype=torch.float)
# Compute the log probabilities.
# Use log_softmax to ensure numerical stability.
logprobs = torch.log_softmax(logits, dim=-1, dtype=torch.float)
# Sample the next tokens.
sample_results = _sample(probs, logprobs, sampling_metadata)
# Get the logprobs query results.
# prompt_logprobs, sample_logprobs = _get_logprobs(
# logprobs, sampling_metadata, sample_results)
prompt_logprobs, sample_logprobs = _get_logprobs(origin_logprobs, sampling_metadata, sample_results)
return _build_sampler_output(sample_results, sampling_metadata, prompt_logprobs, sample_logprobs)
from vllm.model_executor.layers.sampler import Sampler
Sampler._get_logits = _get_logits
Sampler.forward = forward
================================================
FILE: verl/third_party/vllm/vllm_v_0_3_1/model_runner.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/worker/model_runner.py
from typing import Dict, List, Optional, Tuple, Set, Union
import contextlib
import time
import numpy as np
import torch
import torch.nn as nn
from vllm.config import (DeviceConfig, ModelConfig, LoRAConfig, ParallelConfig, SchedulerConfig)
from vllm.logger import init_logger
from vllm.model_executor import InputMetadata, SamplingMetadata
from vllm.sampling_params import SamplingParams, SamplingType
from vllm.sequence import SamplerOutput, SequenceData, SequenceGroupMetadata
from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager
from vllm.lora.layers import LoRAMapping
from vllm.lora.request import LoRARequest
from vllm.utils import in_wsl
from vllm.worker.model_runner import ModelRunner, CUDAGraphRunner, _async_h2d
from .model_loader import get_model
logger = init_logger(__name__)
KVCache = Tuple[torch.Tensor, torch.Tensor]
_PAD_SLOT_ID = -1
LORA_WARMUP_RANK = 8
# Capture graphs for batch size 1, 2, 4, 8, 16, 24, 32, 40, ..., 256.
# NOTE: _get_graph_batch_size needs to be updated if this list is changed.
_BATCH_SIZES_TO_CAPTURE = [1, 2, 4] + [8 * i for i in range(1, 33)]
class ModelRunner(ModelRunner):
def __init__(
self,
model: Union[nn.Module, Dict], # model itself or its parameter dict
model_config: ModelConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
lora_config: Optional[LoRAConfig],
kv_cache_dtype: Optional[str] = "auto",
):
self.model_config = model_config
self.parallel_config = parallel_config
self.scheduler_config = scheduler_config
self.lora_config = lora_config
# model_config can be None in tests/samplers/test_sampler.py.
# FIXME(woosuk): This is a hack to make the tests work. Refactor this.
self.sliding_window = (model_config.get_sliding_window() if model_config is not None else None)
self.device_config = (device_config if device_config is not None else DeviceConfig())
self.device = self.device_config.device
self.model = model # this will be replaced by get_model()
self.block_size = None # Set after initial profiling.
self.lora_manager = None
self.graph_runners: Dict[int, CUDAGraphRunner] = {}
self.graph_memory_pool = None # Set during graph capture.
self.max_context_len_to_capture = (self.model_config.max_context_len_to_capture
if self.model_config is not None else 0)
# When using CUDA graph, the input block tables must be padded to
# max_context_len_to_capture. However, creating the block table in
# Python can be expensive. To optimize this, we cache the block table
# in numpy and only copy the actual input content at every iteration.
# The shape of the cached block table will be
# (max batch size to capture, max context len to capture / block size).
self.graph_block_tables = None # Set after initial profiling.
# cache in_wsl result
self.in_wsl = in_wsl()
self.kv_cache_dtype = kv_cache_dtype
def load_model(self) -> None:
self.model = get_model(actor_model=self.model,
model_config=self.model_config,
device_config=self.device_config,
lora_config=self.lora_config)
vocab_size = self.model.config.vocab_size
if self.lora_config:
assert hasattr(
self.model,
"supported_lora_modules") and self.model.supported_lora_modules, "Model does not support LoRA"
assert hasattr(self.model, "embedding_modules"), "Model does not have embedding_modules"
assert hasattr(self.model, "embedding_padding_modules"), "Model does not have embedding_padding_modules"
self.lora_manager = LRUCacheWorkerLoRAManager(
self.scheduler_config.max_num_seqs,
self.scheduler_config.max_num_batched_tokens + self.scheduler_config.max_paddings, vocab_size,
self.lora_config, self.device, self.model.embedding_modules, self.model.embedding_padding_modules)
self.model = self.lora_manager.create_lora_manager(self.model)
def _prepare_sample(
self,
seq_group_metadata_list: List[SequenceGroupMetadata],
prompt_lens: List[int],
subquery_lens: Optional[List[int]],
) -> SamplingMetadata:
seq_groups: List[Tuple[List[int], SamplingParams]] = []
selected_token_indices: List[int] = []
selected_token_start_idx = 0
categorized_sample_indices = {t: [] for t in SamplingType}
categorized_sample_indices_start_idx = 0
max_subquery_len = max(subquery_lens) if subquery_lens else 1
for i, seq_group_metadata in enumerate(seq_group_metadata_list):
seq_ids = list(seq_group_metadata.seq_data.keys())
sampling_params = seq_group_metadata.sampling_params
seq_groups.append((seq_ids, sampling_params))
if seq_group_metadata.is_prompt:
assert len(seq_ids) == 1
assert subquery_lens is not None
subquery_len = subquery_lens[i]
if sampling_params.prompt_logprobs is not None:
# NOTE: prompt token positions do not need sample, skip
categorized_sample_indices_start_idx += subquery_len - 1
categorized_sample_indices[sampling_params.sampling_type].append(categorized_sample_indices_start_idx)
categorized_sample_indices_start_idx += 1
if sampling_params.prompt_logprobs is not None:
selected_token_indices.extend(
range(selected_token_start_idx, selected_token_start_idx + subquery_len - 1))
selected_token_indices.append(selected_token_start_idx + subquery_len - 1)
selected_token_start_idx += max_subquery_len
else:
num_seqs = len(seq_ids)
selected_token_indices.extend(range(selected_token_start_idx, selected_token_start_idx + num_seqs))
selected_token_start_idx += num_seqs
categorized_sample_indices[sampling_params.sampling_type].extend(
range(categorized_sample_indices_start_idx, categorized_sample_indices_start_idx + num_seqs))
categorized_sample_indices_start_idx += num_seqs
selected_token_indices = _async_h2d(selected_token_indices,
dtype=torch.long,
target_device=self.device,
pin_memory=not self.in_wsl)
categorized_sample_indices = {
t: _async_h2d(seq_ids, dtype=torch.int, target_device=self.device, pin_memory=not self.in_wsl)
for t, seq_ids in categorized_sample_indices.items()
}
seq_data: Dict[int, SequenceData] = {}
for seq_group_metadata in seq_group_metadata_list:
seq_data.update(seq_group_metadata.seq_data)
sampling_metadata = SamplingMetadata(
seq_groups=seq_groups,
seq_data=seq_data,
prompt_lens=prompt_lens,
selected_token_indices=selected_token_indices,
categorized_sample_indices=categorized_sample_indices,
)
return sampling_metadata
def prepare_input_tensors(
self,
seq_group_metadata_list: Optional[List[SequenceGroupMetadata]],
) -> Tuple[torch.Tensor, torch.Tensor, InputMetadata, SamplingMetadata, Set[int], LoRAMapping]:
# NOTE: We assume that all sequences in the group are all prompts or
# all decodes.
is_prompt = seq_group_metadata_list[0].is_prompt
# Prepare input tensors.
if is_prompt:
(input_tokens, input_positions, input_metadata, prompt_lens, subquery_lens, lora_index_mapping,
lora_prompt_mapping, lora_requests) = self._prepare_prompt(seq_group_metadata_list)
else:
(input_tokens, input_positions, input_metadata, lora_index_mapping, lora_prompt_mapping,
lora_requests) = self._prepare_decode(seq_group_metadata_list)
prompt_lens = []
subquery_lens = None
sampling_metadata = self._prepare_sample(seq_group_metadata_list, prompt_lens, subquery_lens)
if self.lora_config:
flat_lora_index_mapping = [item for sublist in lora_index_mapping for item in sublist]
lora_mapping = LoRAMapping(
flat_lora_index_mapping,
lora_prompt_mapping,
)
else:
lora_mapping = None
return (input_tokens, input_positions, input_metadata, sampling_metadata, lora_requests, lora_mapping)
@torch.inference_mode()
def execute_model(
self,
seq_group_metadata_list: Optional[List[SequenceGroupMetadata]],
kv_caches: List[Tuple[torch.Tensor, torch.Tensor]],
) -> Optional[SamplerOutput]:
(input_tokens, input_positions, input_metadata, sampling_metadata, lora_requests,
lora_mapping) = self.prepare_input_tensors(seq_group_metadata_list)
if self.lora_config:
self.set_active_loras(lora_requests, lora_mapping)
# Execute the model.
if input_metadata.use_cuda_graph:
graph_batch_size = input_tokens.shape[0]
model_executable = self.graph_runners[graph_batch_size]
else:
model_executable = self.model
hidden_states = model_executable(
input_ids=input_tokens,
positions=input_positions,
kv_caches=kv_caches,
input_metadata=input_metadata,
)
# Sample the next token.
output = self.model.sample(
hidden_states=hidden_states,
sampling_metadata=sampling_metadata,
)
return output
@torch.inference_mode()
def profile_run(self) -> None:
# Enable top-k sampling to reflect the accurate memory usage.
vocab_size = self.model_config.get_vocab_size()
# FIXME(sgm): this sampling params will call cumsum(), causing the
# deterministic cumsum throw error
sampling_params = SamplingParams(top_p=0.99, top_k=vocab_size - 1)
max_num_batched_tokens = self.scheduler_config.max_num_batched_tokens
max_num_seqs = self.scheduler_config.max_num_seqs
# This represents the maximum number of different requests
# that will have unique loras, an therefore the max amount of memory
# consumption create dummy lora request copies from the lora request
# passed in, which contains a lora from the lora warmup path.
dummy_lora_requests = []
dummy_lora_requests_per_seq = []
if self.lora_config:
for idx in range(self.lora_config.max_loras):
lora_id = idx + 1
dummy_lora_request = LoRARequest(
lora_name=f"warmup_{lora_id}",
lora_int_id=lora_id,
lora_local_path="/not/a/real/path",
)
self.lora_manager.add_dummy_lora(dummy_lora_request, rank=LORA_WARMUP_RANK)
dummy_lora_requests.append(dummy_lora_request)
dummy_lora_requests_per_seq = [
dummy_lora_requests[idx % len(dummy_lora_requests)] for idx in range(max_num_seqs)
]
# Profile memory usage with max_num_sequences sequences and the total
# number of tokens equal to max_num_batched_tokens.
seqs: List[SequenceGroupMetadata] = []
for group_id in range(max_num_seqs):
seq_len = (max_num_batched_tokens // max_num_seqs + (group_id < max_num_batched_tokens % max_num_seqs))
seq_data = SequenceData([0] * seq_len)
seq = SequenceGroupMetadata(
request_id=str(group_id),
is_prompt=True,
seq_data={group_id: seq_data},
sampling_params=sampling_params,
block_tables=None,
lora_request=dummy_lora_requests_per_seq[group_id] if dummy_lora_requests_per_seq else None,
)
seqs.append(seq)
# Run the model with the dummy inputs.
num_layers = self.model_config.get_num_layers(self.parallel_config)
kv_caches = [(None, None)] * num_layers
self.execute_model(seqs, kv_caches)
torch.cuda.synchronize()
return
================================================
FILE: verl/third_party/vllm/vllm_v_0_3_1/parallel_state.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Adapted from
# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/parallel_state.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Model and data parallel groups."""
import torch
import torch.distributed
import vllm.model_executor.parallel_utils.parallel_state as ps
"""
This version is strongly tied with Megatron to implement HybridEngine and weight sharing between vllm and Megatron.
- We assume the Megatron tp+dp+pp world is already established before calling this function.
"""
# Tensor model parallel group that the current rank belongs to.
_TENSOR_MODEL_PARALLEL_GROUP = None
# Micro Data parallel group. Micro data parallel group is additional dp group that origins from splitting training tp
# into infer_tp and micro_tp. By default, we use order micro_dp - tp
_MICRO_DATA_PARALLEL_GROUP = None
def initialize_model_parallel_from_megatron(
tensor_model_parallel_size=None # we set None for backward compatibility to set infer_tp = train_tp
) -> None:
from megatron.core import parallel_state as mpu
from megatron.distributed import new_group
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
if tensor_model_parallel_size is None:
tensor_model_parallel_size = mpu.get_tensor_model_parallel_world_size()
else:
assert isinstance(tensor_model_parallel_size, int)
# Build the tensor model-parallel groups.
assert ps._TENSOR_MODEL_PARALLEL_GROUP is None, ("tensor model parallel group is already initialized")
assert tensor_model_parallel_size <= mpu.get_tensor_model_parallel_world_size(
), 'Not implemented for infer_tp > train_tp'
global _TENSOR_MODEL_PARALLEL_GROUP
global _MICRO_DATA_PARALLEL_GROUP
assert mpu.get_tensor_model_parallel_world_size() % tensor_model_parallel_size == 0
micro_dp_size = mpu.get_tensor_model_parallel_world_size() // tensor_model_parallel_size
world_size: int = torch.distributed.get_world_size()
num_micro_dp_groups = world_size // micro_dp_size
rank = torch.distributed.get_rank()
# Build the micro dp groups.
assert _MICRO_DATA_PARALLEL_GROUP is None, ("micro data parallel group is already initialized")
for i in range(num_micro_dp_groups):
ranks = range(i * micro_dp_size, (i + 1) * micro_dp_size)
group = new_group(rank=rank, ranks=ranks, group_type='micro_dp')
if rank in ranks:
_MICRO_DATA_PARALLEL_GROUP = group
if tensor_model_parallel_size == mpu.get_tensor_model_parallel_world_size():
# using the same tp group as Megatron
ps._TENSOR_MODEL_PARALLEL_GROUP = mpu.get_tensor_model_parallel_group()
_TENSOR_MODEL_PARALLEL_GROUP = mpu.get_tensor_model_parallel_group()
# no _MICRO_DATA_PARALLEL_GROUP
else:
# initialize a micro_dp group and a tp group
# assume training tp=4, infer tp=2, then, weight is partitioned as
# [1], [2], [3], [4] for training and [1,2], [1,2], [3,4], [3,4] for inference
# Build the inference tp groups
train_tp = mpu.get_tensor_model_parallel_world_size()
num_tensor_model_parallel_groups_per_train_tp = train_tp // tensor_model_parallel_size
num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size
assert _TENSOR_MODEL_PARALLEL_GROUP is None, ("tensor model parallel group is already initialized")
for i in range(num_tensor_model_parallel_groups // num_tensor_model_parallel_groups_per_train_tp):
start = train_tp * i
end = train_tp * (i + 1)
for j in range(num_tensor_model_parallel_groups_per_train_tp):
ranks = list(range(start, end, num_tensor_model_parallel_groups_per_train_tp))
for i in range(len(ranks)):
ranks[i] += j
# group = torch.distributed.new_group(ranks)
group = new_group(rank=rank, ranks=ranks, group_type='infer_tp')
if rank in ranks:
_TENSOR_MODEL_PARALLEL_GROUP = group
ps._TENSOR_MODEL_PARALLEL_GROUP = _TENSOR_MODEL_PARALLEL_GROUP
# Build the pipeline model-parallel groups.
# global _PIPELINE_MODEL_PARALLEL_GROUP
# global _PIPELINE_GLOBAL_RANKS
# assert ps._PIPELINE_MODEL_PARALLEL_GROUP is None, ("pipeline model parallel group is already initialized")
# ps._PIPELINE_MODEL_PARALLEL_GROUP = mpu.get_pipeline_model_parallel_group()
# ps._PIPELINE_GLOBAL_RANKS = mpu.get_pipeline_model_parallel_ranks()
"""
Tensor model parallel utilities
"""
def get_tensor_model_parallel_group():
"""Get the tensor model parallel group the caller rank belongs to."""
assert _TENSOR_MODEL_PARALLEL_GROUP is not None, ("tensor model parallel group is not initialized")
return _TENSOR_MODEL_PARALLEL_GROUP
def get_tensor_model_parallel_world_size():
"""Return world size for the tensor model parallel group."""
return torch.distributed.get_world_size(group=get_tensor_model_parallel_group())
def get_tensor_model_parallel_rank():
"""Return my rank for the tensor model parallel group."""
return torch.distributed.get_rank(group=get_tensor_model_parallel_group())
def get_tensor_model_parallel_src_rank():
"""Calculate the global rank corresponding to the first local rank
in the tensor model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_tensor_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
"""
Micro Data parallel group
"""
def get_micro_data_parallel_group():
assert _MICRO_DATA_PARALLEL_GROUP is not None
return _MICRO_DATA_PARALLEL_GROUP
def get_micro_data_parallel_world_size():
return torch.distributed.get_world_size(group=get_micro_data_parallel_group())
def get_micro_data_parallel_rank():
return torch.distributed.get_rank(group=get_micro_data_parallel_group())
================================================
FILE: verl/third_party/vllm/vllm_v_0_3_1/tokenizer.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/transformers_utils/tokenizer_group/tokenizer_group.py
from typing import List, Optional, Tuple, Union
from transformers import (AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast)
from vllm.lora.request import LoRARequest
from vllm.utils import make_async, LRUCache
from vllm.transformers_utils.tokenizers import *
class TokenizerGroup:
"""A group of tokenizers that can be used for LoRA adapters."""
def __init__(self, tokenizer: PreTrainedTokenizer, enable_lora: bool, max_num_seqs: int,
max_input_length: Optional[int]):
self.enable_lora = enable_lora
self.max_input_length = max_input_length
self.tokenizer = tokenizer
if enable_lora:
self.lora_tokenizers = LRUCache(capacity=max_num_seqs)
else:
self.lora_tokenizers = None
def encode(self,
prompt: str,
request_id: Optional[str] = None,
lora_request: Optional[LoRARequest] = None) -> List[int]:
tokenizer = self.get_lora_tokenizer(lora_request)
return tokenizer.encode(prompt)
async def encode_async(self,
prompt: str,
request_id: Optional[str] = None,
lora_request: Optional[LoRARequest] = None) -> List[int]:
tokenizer = await self.get_lora_tokenizer_async(lora_request)
return tokenizer.encode(prompt)
def get_lora_tokenizer(self, lora_request: Optional[LoRARequest]) -> "PreTrainedTokenizer":
if not lora_request or not self.enable_lora:
return self.tokenizer
if lora_request.lora_int_id not in self.lora_tokenizers:
# TODO(sgm): the lora tokenizer is also passed, but may be different
tokenizer = self.tokenizer
# tokenizer = (get_lora_tokenizer(
# lora_request, **self.tokenizer_config) or self.tokenizer)
self.lora_tokenizers.put(lora_request.lora_int_id, tokenizer)
return tokenizer
else:
return self.lora_tokenizers.get(lora_request.lora_int_id)
# FIXME(sgm): for simplicity, we assign the special token here
@property
def pad_token_id(self):
return self.tokenizer.pad_token_id
@property
def eos_token_id(self):
return self.tokenizer.eos_token_id
================================================
FILE: verl/third_party/vllm/vllm_v_0_3_1/weight_loaders.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/models
from typing import Dict
import torch
import torch.nn as nn
# NOTE(shengguangming): replace the origin weight loader function in the class
def parallel_weight_loader(self, param: torch.Tensor, loaded_weight: torch.Tensor) -> None:
"""Parallel Linear weight loader."""
assert param.size() == loaded_weight.size(
), 'the parameter size is not align with the loaded weight size, param size: {}, loaded_weight size: {}'.format(
param.size(), loaded_weight.size())
assert param.data.dtype == loaded_weight.data.dtype, "if we want to shared weights, the data type should also be the same"
param.data = loaded_weight.data
def default_weight_loader(param: torch.Tensor, loaded_weight: torch.Tensor) -> None:
"""Default weight loader."""
assert param.size() == loaded_weight.size()
assert param.data.dtype == loaded_weight.data.dtype, "if we want to shared weights, the data type should also be the same"
param.data = loaded_weight.data
def gpt2_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "lm_head.weight" in name:
# GPT-2 ties the weights of the embedding layer and the final
# linear layer.
continue
if ".attn.bias" in name or ".attn.masked_bias" in name:
# Skip attention mask.
# NOTE: "c_attn.bias" should not be skipped.
continue
if not name.startswith("transformer."):
name = "transformer." + name
param = params_dict[name]
# The HF's GPT-2 implementation uses Conv1D instead of Linear.
# Because of this, we need to transpose the weights.
# Note(zhuohan): the logic below might break quantized models.
for conv1d_weight_name in ["c_attn", "c_proj", "c_fc"]:
if conv1d_weight_name not in name:
continue
if not name.endswith(".weight"):
continue
# TODO: check megatron
loaded_weight = loaded_weight.t()
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def llama_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
# NOTE(shengguangming): the megatron llama may have this prefix
prefix = '0.module.module.'
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
if name[:len(prefix)] == prefix:
name = name[len(prefix):]
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def mistral_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
# TODO: need to implement a general way to deal with prefix
prefix = '0.module.module.'
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
if name[:len(prefix)] == prefix:
name = name[len(prefix):]
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
================================================
FILE: verl/third_party/vllm/vllm_v_0_3_1/worker.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/worker/worker.py
"""A GPU worker class."""
import os
import gc
from typing import Dict, List, Tuple, Optional, Union, Set
import torch
import torch.distributed
import torch.nn as nn
from vllm.config import (CacheConfig, DeviceConfig, ModelConfig, ParallelConfig, SchedulerConfig, LoRAConfig)
from vllm.model_executor import InputMetadata, set_random_seed
from vllm.model_executor.parallel_utils.parallel_state import (initialize_model_parallel)
from vllm.sampling_params import SamplingParams, SamplingType
from vllm.sequence import SamplerOutput, SequenceData, SequenceGroupMetadata
from vllm.worker.cache_engine import CacheEngine
from vllm.model_executor.parallel_utils.custom_all_reduce import init_custom_ar
from vllm.model_executor.parallel_utils.parallel_state import get_tensor_model_parallel_group
from .model_runner import ModelRunner
from .model_loader import load_weights
from .parallel_state import initialize_model_parallel_from_megatron
from vllm.lora.request import LoRARequest
class Worker:
"""A worker class that executes (a partition of) the model on a GPU.
Each worker is associated with a single GPU. The worker is responsible for
maintaining the KV cache and executing the model on the GPU. In case of
distributed inference, each worker is assigned a partition of the model.
"""
def __init__(
self,
model: Union[nn.Module, Dict], # model itself or its parameter dict
model_config: ModelConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
rank: Optional[int] = None,
distributed_init_method: Optional[str] = None,
lora_config: Optional[LoRAConfig] = None,
kv_cache_dtype: Optional[str] = "auto",
) -> None:
# self.model = model # will be replaced in the init_model
self.model_config = model_config
self.parallel_config = parallel_config
self.scheduler_config = scheduler_config
self.rank = rank
self.distributed_init_method = distributed_init_method
self.lora_config = lora_config
self.model_runner = ModelRunner(
model,
model_config,
parallel_config,
scheduler_config,
device_config,
lora_config=self.lora_config,
kv_cache_dtype=kv_cache_dtype,
)
# Uninitialized cache engine. Will be initialized by
# self.init_cache_engine().
self.cache_config = None
self.block_size = None
self.sliding_window = None
self.cache_engine = None
self.cache_events = None
self.gpu_cache = None
# For offloading inference engine params
self.cpu_model = None
def init_model(self, cupy_port: Optional[int] = None):
# torch.distributed.all_reduce does not free the input tensor until
# the synchronization point. This causes the memory usage to grow
# as the number of all_reduce calls increases. This env var disables
# this behavior.
# Related issue:
# https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
# Env vars will be set by TORCHRUN.
self.rank = self.rank if self.rank is not None else int(os.getenv("RANK", "-1"))
local_rank = int(os.getenv("LOCAL_RANK", "0"))
self.device = torch.device(f"cuda:{local_rank}")
if self.rank < 0:
raise ValueError("Invalid or unspecified rank.")
torch.cuda.set_device(self.device)
_check_if_gpu_supports_dtype(self.model_config.dtype)
# Initialize the distributed environment.
# TODO: do not use cupy
_init_distributed_environment(self.parallel_config, self.rank, self.distributed_init_method)
if not self.parallel_config.disable_custom_all_reduce:
init_custom_ar()
# Initialize the model.
set_random_seed(self.model_config.seed)
# self.model = get_model(actor_model=self.model, model_config=self.model_config)
def load_model(self):
self.model_runner.load_model()
@torch.inference_mode()
def profile_num_available_blocks(
self,
block_size: int,
gpu_memory_utilization: float,
cpu_swap_space: int,
cache_dtype: str,
) -> Tuple[int, int]:
# Profile the memory usage of the model and get the maximum number of
# cache blocks that can be allocated with the remaining free memory.
torch.cuda.empty_cache()
# torch.cuda.reset_peak_memory_stats()
# Execute a forward pass with dummy inputs to profile the memory usage
# of the model.
self.model_runner.profile_run()
# Calculate the number of blocks that can be allocated with the
# profiled peak memory.
torch.cuda.synchronize()
free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info()
peak_memory = total_gpu_memory - free_gpu_memory
cache_block_size = CacheEngine.get_cache_block_size(block_size, cache_dtype, self.model_config,
self.parallel_config)
# NOTE(sgm) use the remaining memory
num_gpu_blocks = int((free_gpu_memory * gpu_memory_utilization) // cache_block_size)
# num_gpu_blocks = int((total_gpu_memory * gpu_memory_utilization - peak_memory) // cache_block_size)
num_cpu_blocks = int(cpu_swap_space // cache_block_size)
num_gpu_blocks = max(num_gpu_blocks, 0)
num_cpu_blocks = max(num_cpu_blocks, 0)
if self.model_runner.lora_manager:
self.model_runner.remove_all_loras()
gc.collect()
torch.cuda.empty_cache()
# Synchronize number of blocks with all the rank
num_gpu_blocks = torch.tensor([num_gpu_blocks], device='cuda')
num_cpu_blocks = torch.tensor([num_cpu_blocks], device='cuda')
torch.distributed.all_reduce(num_gpu_blocks,
op=torch.distributed.ReduceOp.MIN,
group=get_tensor_model_parallel_group())
torch.distributed.all_reduce(num_cpu_blocks,
op=torch.distributed.ReduceOp.MIN,
group=get_tensor_model_parallel_group())
num_gpu_blocks = num_gpu_blocks.item()
num_cpu_blocks = num_cpu_blocks.item()
return num_gpu_blocks, num_cpu_blocks
def init_cache_engine(self, cache_config: CacheConfig) -> None:
if self.cache_engine is None and self.gpu_cache is None:
self.cache_config = cache_config
self.cache_engine = CacheEngine(self.cache_config, self.model_config, self.parallel_config)
self.cache_events = self.cache_engine.events
self.gpu_cache = self.cache_engine.gpu_cache
self.model_runner.set_block_size(self.cache_engine.block_size)
def free_cache_engine(self):
# ensure `enforce_eager=True`
self.cache_engine = None
self.gpu_cache = None
def warm_up_model(self) -> None:
if not self.model_config.enforce_eager:
self.model_runner.capture_model(self.gpu_cache)
# Reset the seed to ensure that the random state is not affected by
# the model initialization and profiling.
set_random_seed(self.model_config.seed)
def cache_swap(
self,
blocks_to_swap_in: Dict[int, int],
blocks_to_swap_out: Dict[int, int],
blocks_to_copy: Dict[int, List[int]],
) -> None:
# Issue cache operations.
issued_cache_op = False
if blocks_to_swap_in:
self.cache_engine.swap_in(blocks_to_swap_in)
issued_cache_op = True
if blocks_to_swap_out:
self.cache_engine.swap_out(blocks_to_swap_out)
issued_cache_op = True
if blocks_to_copy:
self.cache_engine.copy(blocks_to_copy)
issued_cache_op = True
cache_events = self.cache_events if issued_cache_op else None
# Wait for cache operations to finish.
# TODO(woosuk): Profile swapping overhead and optimize if needed.
if cache_events is not None:
for event in cache_events:
event.wait()
@torch.inference_mode()
def execute_model(
self,
seq_group_metadata_list: List[SequenceGroupMetadata],
blocks_to_swap_in: Dict[int, int],
blocks_to_swap_out: Dict[int, int],
blocks_to_copy: Dict[int, List[int]],
) -> SamplerOutput:
num_seq_groups = len(seq_group_metadata_list)
self.cache_swap(blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy)
# If there is no input, we don't need to execute the model.
if num_seq_groups == 0:
return {}
output = self.model_runner.execute_model(seq_group_metadata_list, self.gpu_cache)
return output
# # Prepare input tensors.
# # NOTE(shengguangming): currently we pad in our dataloader and unpad it in pre_process_input, j
# # we can just input un-padded sequence for better performance
# input_tokens, input_positions, input_metadata = self._prepare_inputs(seq_group_metadata_list)
# # Execute the model.
# output = self.model(
# input_ids=input_tokens,
# positions=input_positions,
# kv_caches=self.gpu_cache,
# input_metadata=input_metadata,
# cache_events=cache_events,
# )
# return output
# assume the input is .state_dict()
def sync_model_weights(self, actor_weights: Dict):
load_weights(actor_weights, self.model_runner.model)
def offload_model_weights(self) -> None:
if self.cpu_model == None:
self.cpu_model = {}
for name, params in self.model_runner.model.named_parameters():
self.cpu_model[name] = torch.empty_like(params, device='cpu')
params.data = self.cpu_model[name]
else:
for name, params in self.model_runner.model.named_parameters():
params.data = self.cpu_model[name]
def add_lora(self, lora_request: LoRARequest) -> bool:
return self.model_runner.add_lora(lora_request)
def remove_lora(self, lora_id: int) -> bool:
return self.model_runner.remove_lora(lora_id)
def list_loras(self) -> Set[int]:
return self.model_runner.list_loras()
def _init_distributed_environment(
parallel_config: ParallelConfig,
rank: int,
distributed_init_method: Optional[str] = None,
) -> None:
"""Initialize the distributed environment."""
if torch.distributed.is_initialized():
print('The distributed environment has been initialized before vLLM')
elif not distributed_init_method:
raise ValueError("distributed_init_method must be set if torch.distributed "
"is not already initialized")
else:
torch.distributed.init_process_group(
backend="nccl",
world_size=parallel_config.world_size,
rank=rank,
# init_method=distributed_init_method,
)
# A small all_reduce for warmup.
torch.distributed.all_reduce(torch.zeros(1).cuda())
# TODO (shengguangming): maybe we should also flag the megatron is initialized
if torch.distributed.get_world_size() > 1:
initialize_model_parallel_from_megatron(tensor_model_parallel_size=parallel_config.tensor_parallel_size)
else:
initialize_model_parallel()
def _pad_to_alignment(x: List[int], multiple_of: int, pad: int) -> List[int]:
return x + [pad] * ((-len(x)) % multiple_of)
def _pad_to_max(x: List[int], max_len: int, pad: int) -> List[int]:
return x + [pad] * (max_len - len(x))
def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype):
# Check if the GPU supports the dtype.
if torch_dtype == torch.bfloat16:
compute_capability = torch.cuda.get_device_capability()
if compute_capability[0] < 8:
gpu_name = torch.cuda.get_device_name()
raise ValueError("Bfloat16 is only supported on GPUs with compute capability "
f"of at least 8.0. Your {gpu_name} GPU has compute capability "
f"{compute_capability[0]}.{compute_capability[1]}.")
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/arg_utils.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/engine/arg_utils.py
import os
import argparse
import dataclasses
from dataclasses import dataclass
from typing import List, Optional, Union
import torch.nn as nn
from transformers import PretrainedConfig
from .config import ModelConfig, LoadConfig
from vllm.config import (CacheConfig, DecodingConfig, DeviceConfig, EngineConfig, LoRAConfig, ParallelConfig,
SchedulerConfig, SpeculativeConfig, TokenizerPoolConfig, VisionLanguageConfig)
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS
from vllm.utils import str_to_int_tuple
def nullable_str(val: str):
if not val or val == "None":
return None
return val
@dataclass
class EngineArgs:
"""Arguments for vLLM engine."""
model_hf_config: PretrainedConfig = None
skip_tokenizer_init: bool = False
served_model_name: Optional[Union[str, List[str]]] = None # TODO
download_dir: Optional[str] = None
load_format: str = 'auto'
dtype: str = 'auto'
kv_cache_dtype: str = 'auto'
quantization_param_path: Optional[str] = None
seed: int = 0
max_model_len: Optional[int] = None
worker_use_ray: bool = False
pipeline_parallel_size: int = 1
tensor_parallel_size: int = 1
max_parallel_loading_workers: Optional[int] = None
block_size: int = 16
enable_prefix_caching: bool = False
use_v2_block_manager: bool = False
swap_space: int = 4 # GiB
gpu_memory_utilization: float = 0.90
max_num_batched_tokens: Optional[int] = None
max_num_seqs: int = 256
max_logprobs: int = 5 # OpenAI default value
disable_log_stats: bool = False
revision: Optional[str] = None
code_revision: Optional[str] = None
tokenizer_revision: Optional[str] = None
quantization: Optional[str] = None
enforce_eager: bool = False
max_context_len_to_capture: Optional[int] = None
max_seq_len_to_capture: int = 8192
disable_custom_all_reduce: bool = False
tokenizer_pool_size: int = 0
tokenizer_pool_type: str = "ray"
tokenizer_pool_extra_config: Optional[dict] = None
enable_lora: bool = False
max_loras: int = 1
max_lora_rank: int = 16
fully_sharded_loras: bool = False
lora_extra_vocab_size: int = 256
lora_dtype = 'auto'
max_cpu_loras: Optional[int] = None
device: str = 'auto'
ray_workers_use_nsight: bool = False
num_gpu_blocks_override: Optional[int] = None
num_lookahead_slots: int = 0
model_loader_extra_config: Optional[dict] = None
# Related to Vision-language models such as llava
image_input_type: Optional[str] = None
image_token_id: Optional[int] = None
image_input_shape: Optional[str] = None
image_feature_size: Optional[int] = None
scheduler_delay_factor: float = 0.0
enable_chunked_prefill: bool = False
guided_decoding_backend: str = 'outlines'
# Speculative decoding configuration.
speculative_model: Optional[str] = None
num_speculative_tokens: Optional[int] = None
speculative_max_model_len: Optional[int] = None
ngram_prompt_lookup_max: Optional[int] = None
ngram_prompt_lookup_min: Optional[int] = None
@staticmethod
def add_cli_args(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Shared CLI arguments for vLLM engine."""
# Model arguments
# TODO(shengguangming): delete the unused args
parser.add_argument('--model',
type=str,
default='facebook/opt-125m',
help='name or path of the huggingface model to use')
parser.add_argument('--tokenizer',
type=str,
default=EngineArgs.tokenizer,
help='name or path of the huggingface tokenizer to use')
parser.add_argument('--revision',
type=str,
default=None,
help='the specific model version to use. It can be a branch '
'name, a tag name, or a commit id. If unspecified, will use '
'the default version.')
parser.add_argument('--tokenizer-revision',
type=str,
default=None,
help='the specific tokenizer version to use. It can be a branch '
'name, a tag name, or a commit id. If unspecified, will use '
'the default version.')
parser.add_argument('--tokenizer-mode',
type=str,
default=EngineArgs.tokenizer_mode,
choices=['auto', 'slow'],
help='tokenizer mode. "auto" will use the fast '
'tokenizer if available, and "slow" will '
'always use the slow tokenizer.')
parser.add_argument('--trust-remote-code', action='store_true', help='trust remote code from huggingface')
parser.add_argument('--download-dir',
type=str,
default=EngineArgs.download_dir,
help='directory to download and load the weights, '
'default to the default cache dir of '
'huggingface')
parser.add_argument('--load-format',
type=str,
default=EngineArgs.load_format,
choices=['auto', 'pt', 'safetensors', 'npcache', 'dummy'],
help='The format of the model weights to load. '
'"auto" will try to load the weights in the safetensors format '
'and fall back to the pytorch bin format if safetensors format '
'is not available. '
'"pt" will load the weights in the pytorch bin format. '
'"safetensors" will load the weights in the safetensors format. '
'"npcache" will load the weights in pytorch format and store '
'a numpy cache to speed up the loading. '
'"dummy" will initialize the weights with random values, '
'which is mainly for profiling.')
parser.add_argument('--dtype',
type=str,
default=EngineArgs.dtype,
choices=['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'],
help='data type for model weights and activations. '
'The "auto" option will use FP16 precision '
'for FP32 and FP16 models, and BF16 precision '
'for BF16 models.')
parser.add_argument('--max-model-len',
type=int,
default=None,
help='model context length. If unspecified, '
'will be automatically derived from the model.')
# Parallel arguments
parser.add_argument('--worker-use-ray',
action='store_true',
help='use Ray for distributed serving, will be '
'automatically set when using more than 1 GPU')
parser.add_argument('--pipeline-parallel-size',
'-pp',
type=int,
default=EngineArgs.pipeline_parallel_size,
help='number of pipeline stages')
parser.add_argument('--tensor-parallel-size',
'-tp',
type=int,
default=EngineArgs.tensor_parallel_size,
help='number of tensor parallel replicas')
# KV cache arguments
parser.add_argument('--block-size',
type=int,
default=EngineArgs.block_size,
choices=[8, 16, 32],
help='token block size')
# TODO(woosuk): Support fine-grained seeds (e.g., seed per request).
parser.add_argument('--seed', type=int, default=EngineArgs.seed, help='random seed')
parser.add_argument('--swap-space',
type=int,
default=EngineArgs.swap_space,
help='CPU swap space size (GiB) per GPU')
parser.add_argument('--gpu-memory-utilization',
type=float,
default=EngineArgs.gpu_memory_utilization,
help='the percentage of GPU memory to be used for'
'the model executor')
parser.add_argument('--max-num-batched-tokens',
type=int,
default=EngineArgs.max_num_batched_tokens,
help='maximum number of batched tokens per '
'iteration')
parser.add_argument('--max-num-seqs',
type=int,
default=EngineArgs.max_num_seqs,
help='maximum number of sequences per iteration')
parser.add_argument('--disable-log-stats', action='store_true', help='disable logging statistics')
# Quantization settings.
parser.add_argument('--quantization',
'-q',
type=str,
choices=['awq', None],
default=None,
help='Method used to quantize the weights')
return parser
@classmethod
def from_cli_args(cls, args: argparse.Namespace) -> 'EngineArgs':
# Get the list of attributes of this dataclass.
attrs = [attr.name for attr in dataclasses.fields(cls)]
# Set the attributes from the parsed arguments.
engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
return engine_args
def create_engine_config(
self,
) -> EngineConfig:
device_config = DeviceConfig(self.device)
# NOTE(sgm): we only modify ModelConfig, other configs are import from vllm
model_config = ModelConfig(self.model_hf_config, self.dtype, self.seed, self.revision, self.code_revision,
self.tokenizer_revision, self.max_model_len, self.quantization,
self.quantization_param_path, self.enforce_eager, self.max_context_len_to_capture,
self.max_seq_len_to_capture, self.max_logprobs, self.skip_tokenizer_init,
self.served_model_name)
cache_config = CacheConfig(self.block_size, self.gpu_memory_utilization,
self.swap_space, self.kv_cache_dtype, self.num_gpu_blocks_override,
model_config.get_sliding_window(), self.enable_prefix_caching)
parallel_config = ParallelConfig(
self.pipeline_parallel_size, self.tensor_parallel_size, self.worker_use_ray,
self.max_parallel_loading_workers, self.disable_custom_all_reduce,
TokenizerPoolConfig.create_config(
self.tokenizer_pool_size,
self.tokenizer_pool_type,
self.tokenizer_pool_extra_config,
), self.ray_workers_use_nsight)
# Use the world_size set by TORCHRUN
world_size = int(os.getenv("WORLD_SIZE", "-1"))
assert world_size != -1, "The world_size is set to -1, not initialized by TORCHRUN"
parallel_config.world_size = world_size
# TODO: spec config
speculative_config = SpeculativeConfig.maybe_create_spec_config(
target_model_config=model_config,
target_parallel_config=parallel_config,
target_dtype=self.dtype,
speculative_model=self.speculative_model,
num_speculative_tokens=self.num_speculative_tokens,
speculative_max_model_len=self.speculative_max_model_len,
enable_chunked_prefill=self.enable_chunked_prefill,
use_v2_block_manager=self.use_v2_block_manager,
ngram_prompt_lookup_max=self.ngram_prompt_lookup_max,
ngram_prompt_lookup_min=self.ngram_prompt_lookup_min,
)
scheduler_config = SchedulerConfig(
self.max_num_batched_tokens,
self.max_num_seqs,
model_config.max_model_len,
self.use_v2_block_manager,
num_lookahead_slots=(self.num_lookahead_slots
if speculative_config is None else speculative_config.num_lookahead_slots),
delay_factor=self.scheduler_delay_factor,
enable_chunked_prefill=self.enable_chunked_prefill,
)
lora_config = LoRAConfig(max_lora_rank=self.max_lora_rank,
max_loras=self.max_loras,
fully_sharded_loras=self.fully_sharded_loras,
lora_extra_vocab_size=self.lora_extra_vocab_size,
lora_dtype=self.lora_dtype,
max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras and self.max_cpu_loras > 0 else
None) if self.enable_lora else None
load_config = LoadConfig(
load_format=self.load_format,
download_dir=self.download_dir,
model_loader_extra_config=self.model_loader_extra_config,
)
if self.image_input_type:
if (not self.image_token_id or not self.image_input_shape or not self.image_feature_size):
raise ValueError('Specify `image_token_id`, `image_input_shape` and '
'`image_feature_size` together with `image_input_type`.')
vision_language_config = VisionLanguageConfig(
image_input_type=VisionLanguageConfig.get_image_input_enum_type(self.image_input_type),
image_token_id=self.image_token_id,
image_input_shape=str_to_int_tuple(self.image_input_shape),
image_feature_size=self.image_feature_size,
)
else:
vision_language_config = None
decoding_config = DecodingConfig(guided_decoding_backend=self.guided_decoding_backend)
return EngineConfig(model_config=model_config,
cache_config=cache_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config,
device_config=device_config,
lora_config=lora_config,
vision_language_config=vision_language_config,
speculative_config=speculative_config,
load_config=load_config,
decoding_config=decoding_config)
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/config.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/config.py
import enum
import json
from typing import List, Optional, Union
from dataclasses import dataclass, field, fields
from transformers import PretrainedConfig
from vllm.logger import init_logger
from vllm.model_executor.layers.quantization import get_quantization_config
from vllm.transformers_utils.config import get_hf_text_config
from vllm.utils import is_hip
# Add for verl
from vllm.config import ModelConfig, _get_and_verify_dtype, _get_and_verify_max_len
GPTQMarlinConfig = get_quantization_config("gptq_marlin")
logger = init_logger(__name__)
_GB = 1 << 30
class ModelConfig(ModelConfig):
"""Configuration for the model.
Args:
model: Name or path of the huggingface model to use.
tokenizer: Name or path of the huggingface tokenizer to use.
tokenizer_mode: Tokenizer mode. "auto" will use the fast tokenizer if
available, and "slow" will always use the slow tokenizer.
trust_remote_code: Trust remote code (e.g., from HuggingFace) when
downloading the model and tokenizer.
download_dir: Directory to download and load the weights, default to the
default cache directory of huggingface.
load_format: The format of the model weights to load:
"auto" will try to load the weights in the safetensors format and
fall back to the pytorch bin format if safetensors format is
not available.
"pt" will load the weights in the pytorch bin format.
"safetensors" will load the weights in the safetensors format.
"npcache" will load the weights in pytorch format and store
a numpy cache to speed up the loading.
"dummy" will initialize the weights with random values, which is
mainly for profiling.
dtype: Data type for model weights and activations. The "auto" option
will use FP16 precision for FP32 and FP16 models, and BF16 precision
for BF16 models.
seed: Random seed for reproducibility.
revision: The specific model version to use. It can be a branch name,
a tag name, or a commit id. If unspecified, will use the default
version.
code_revision: The specific revision to use for the model code on
Hugging Face Hub. It can be a branch name, a tag name, or a
commit id. If unspecified, will use the default version.
tokenizer_revision: The specific tokenizer version to use. It can be a
branch name, a tag name, or a commit id. If unspecified, will use
the default version.
max_model_len: Maximum length of a sequence (including prompt and
output). If None, will be derived from the model.
quantization: Quantization method that was used to quantize the model
weights. If None, we assume the model weights are not quantized.
quantization_param_path: Path to JSON file containing scaling factors.
Used to load KV cache scaling factors into the model when KV cache
type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also
be used to load activation and weight scaling factors when the
model dtype is FP8_E4M3 on ROCm.
enforce_eager: Whether to enforce eager execution. If True, we will
disable CUDA graph and always execute the model in eager mode.
If False, we will use CUDA graph and eager execution in hybrid.
max_context_len_to_capture: Maximum context len covered by CUDA graphs.
When a sequence has context length larger than this, we fall back
to eager mode (DEPRECATED. Use max_seq_len_to_capture instead).
max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs.
When a sequence has context length larger than this, we fall back
to eager mode
skip_tokenizer_init: If true, skip initialization of tokenizer and
detokenizer.
served_model_name: The model name used in metrics tag `model_name`,
matches the model name exposed via the APIs. If multiple model
names provided, the first name will be used. If not specified,
the model name will be the same as `model`.
"""
def __init__(
self,
hf_config: PretrainedConfig,
dtype: str,
seed: int,
revision: Optional[str] = None,
code_revision: Optional[str] = None,
tokenizer_revision: Optional[str] = None,
max_model_len: Optional[int] = None,
quantization: Optional[str] = None,
quantization_param_path: Optional[str] = None,
enforce_eager: bool = False,
max_context_len_to_capture: Optional[int] = None,
max_seq_len_to_capture: Optional[int] = None,
max_logprobs: int = 5,
skip_tokenizer_init: bool = False,
served_model_name: Optional[Union[str, List[str]]] = None,
) -> None:
self.model = hf_config._name_or_path
self.tokenizer = hf_config._name_or_path
self.seed = seed
self.revision = revision
self.code_revision = code_revision
self.tokenizer_revision = tokenizer_revision
self.quantization = quantization
self.quantization_param_path = quantization_param_path
self.enforce_eager = enforce_eager
self.max_context_len_to_capture = max_context_len_to_capture
if self.max_context_len_to_capture is not None:
raise ValueError("`max_context_len_to_capture` is deprecated. "
"Use `max_seq_len_to_capture` instead.")
self.max_seq_len_to_capture = (max_seq_len_to_capture or max_context_len_to_capture)
self.max_logprobs = max_logprobs
self.skip_tokenizer_init = skip_tokenizer_init
# self.hf_config = get_config(model, trust_remote_code, revision)
self.hf_config = hf_config
self.hf_text_config = get_hf_text_config(hf_config)
# TODO: for multimodal model
self.dtype = _get_and_verify_dtype(self.hf_config, dtype)
self.max_model_len = _get_and_verify_max_len(self.hf_config, max_model_len)
# self.served_model_name = get_served_model_name(model,
# served_model_name)
# self._verify_load_format()
# self._verify_tokenizer_mode()
self._verify_quantization()
self._verify_cuda_graph()
class LoadFormat(str, enum.Enum):
AUTO = 'auto'
MEGATRON = "megatron"
HF = "hf"
DTENSOR = 'dtensor'
DUMMY_HF = 'dummy_hf'
DUMMY_MEGATRON = 'dummy_megatron'
DUMMY_DTENSOR = 'dummy_dtensor'
@dataclass
class LoadConfig:
"""
download_dir: Directory to download and load the weights, default to the
default cache directory of huggingface.
load_format: The format of the model weights to load:
"auto" will try to load the weights in the safetensors format and
fall back to the pytorch bin format if safetensors format is
not available.
"pt" will load the weights in the pytorch bin format.
"safetensors" will load the weights in the safetensors format.
"npcache" will load the weights in pytorch format and store
a numpy cache to speed up the loading.
"dummy" will initialize the weights with random values, which is
mainly for profiling.
"tensorizer" will use CoreWeave's tensorizer library for
fast weight loading.
"""
load_format: Union[str, LoadFormat, "BaseModelLoader"] = LoadFormat.AUTO
download_dir: Optional[str] = None
model_loader_extra_config: Optional[Union[str, dict]] = field(default_factory=dict)
def __post_init__(self):
model_loader_extra_config = self.model_loader_extra_config or {}
if isinstance(model_loader_extra_config, str):
self.model_loader_extra_config = json.loads(model_loader_extra_config)
self._verify_load_format()
def _verify_load_format(self) -> None:
if not isinstance(self.load_format, str):
return
load_format = self.load_format.lower()
self.load_format = LoadFormat(load_format)
rocm_not_supported_load_format: List[str] = []
if is_hip() and load_format in rocm_not_supported_load_format:
rocm_supported_load_format = [
f for f in LoadFormat.__members__ if (f not in rocm_not_supported_load_format)
]
raise ValueError(f"load format '{load_format}' is not supported in ROCm. "
f"Supported load formats are "
f"{rocm_supported_load_format}")
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/dtensor_weight_loaders.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/models
from typing import Dict, Iterable, Tuple
import torch
import torch.nn as nn
from torch.distributed._tensor import DTensor, Shard, Replicate
from vllm.model_executor.layers.linear import *
from vllm.model_executor.models import ModelRegistry
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
def gemma_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
for (param_name, shard_name, shard_id) in stacked_params_mapping:
if shard_name not in name:
continue
stacked_name = name.replace(shard_name, param_name)
# Skip loading extra bias for GPTQ models.
if stacked_name.endswith(".bias") and stacked_name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[stacked_name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# lm_head is not used in vllm as it is tied with embed_token.
# To prevent errors, skip loading lm_head.weight.
if "lm_head.weight" in name:
continue
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# GemmaRMSNorm is different from Llama's in that it multiplies
# (1 + weight) to the output, instead of just weight.
if "norm.weight" in name:
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
norm_weight = local_loaded_weight + 1.0
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, norm_weight.to(dtype=param.dtype))
else:
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def gptbigcode_dtensor_load_weights(actor_weights: Dict, vllm_model: nn.Module):
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "lm_head.weight" in name:
continue
if ".attn.bias" in name:
# Skip attention mask.
# NOTE: "c_attn.bias" should not be skipped.
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def starcoder2_dtensor_load_weights(actor_weights: Dict, vllm_model: nn.Module):
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
]
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
for (param_name, weight_name, shard_id) in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def llama_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
(".qkv_proj", ".q_proj", "q"),
(".qkv_proj", ".k_proj", "k"),
(".qkv_proj", ".v_proj", "v"),
(".gate_up_proj", ".gate_proj", 0),
(".gate_up_proj", ".up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
if ("rotary_emb.cos_cached" in name or "rotary_emb.sin_cached" in name):
# Models trained using ColossalAI may include these tensors in
# the checkpoint. Skip them.
continue
# With tie_word_embeddings, we can skip lm_head.weight
# The weight might appear unnecessarily in the files if the model is
# processed with quantization, LoRA, fine-tuning, etc.
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
for (param_name, weight_name, shard_id) in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight)
def qwen2_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
for (param_name, weight_name, shard_id) in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def gpt2_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
pass
def redistribute_dtensor(param_name: str, loaded_weights: DTensor, parallelize_plan: Dict = None):
param_name = _process_parameter_names(name=param_name)
if parallelize_plan is not None:
assert param_name in parallelize_plan.keys(), \
f"param name: {param_name} not in parallelize_plan :{parallelize_plan.keys()}"
placement = parallelize_plan[param_name]
local_loaded_weights = loaded_weights.redistribute(device_mesh=loaded_weights.device_mesh,
placements=placement).to_local()
else:
local_loaded_weights = loaded_weights.full_tensor()
return local_loaded_weights
def _process_parameter_names(name):
# Remove '.weight' if it exists at the end of the string
if name.endswith(".weight"):
name = name[:-7]
# Remove 'model.layers.x.' or 'model.' prefix
if "model.layers" in name:
parts = name.split('.')
# Reconstruct the string without 'model.layers.x.'
name = '.'.join(parts[3:]) # parts[0] is 'model', parts[1] is 'layers', parts[2] is 'x'
elif name.startswith("model."):
name = name[6:] # Remove 'model.'
return name
__MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__ = {
'GPT2LMHeadModel': gpt2_dtensor_weight_loader,
'LlamaForCausalLM': llama_dtensor_weight_loader,
'LLaMAForCausalLM': llama_dtensor_weight_loader,
'MistralForCausalLM': llama_dtensor_weight_loader, # mistral is the same as llama in vLLM
'InternLMForCausalLM': llama_dtensor_weight_loader,
'AquilaModel': llama_dtensor_weight_loader,
'AquilaForCausalLM': llama_dtensor_weight_loader,
'Phi3ForCausalLM': llama_dtensor_weight_loader,
'GemmaForCausalLM': gemma_dtensor_weight_loader,
'GPTBigCodeForCausalLM': gptbigcode_dtensor_load_weights,
'Starcoder2ForCausalLM': starcoder2_dtensor_load_weights,
'Qwen2ForCausalLM': qwen2_dtensor_weight_loader
}
# the actor model is .state_dict()
# Load dtensor weights
def load_dtensor_weights(actor_weights: Dict, vllm_model: nn.Module):
weight_loader = _get_model_weight_loader(vllm_model.__class__.__name__)
weight_loader(actor_weights, vllm_model)
# NOTE(sgm) to reduce peak memory usage, we offload vllm model to cpu
# after init, and we need this after sync model weights for in first iter.
vllm_model = vllm_model.cuda()
def _get_model_weight_loader(arch: str):
if arch in __MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__:
return __MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__[arch]
raise ValueError(f"Model architectures {arch} are not supported for now. "
f"Supported architectures: {__MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__.keys()}")
# NOTE(sgm): we use per-parameter weight loader in each vllm sub
def update_dtensor_weight_loader():
pass
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/hf_weight_loader.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/models
from typing import Dict, Union, Optional, Iterable, Tuple
import torch
import torch.nn as nn
from vllm.model_executor.model_loader.utils import set_default_torch_dtype
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
def update_hf_weight_loader():
from vllm.model_executor.models.gemma import GemmaForCausalLM
GemmaForCausalLM.load_weights = gemma_load_weights
def gemma_load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(self.named_parameters())
loaded_params = set()
for name, loaded_weight in weights:
for (param_name, shard_name, shard_id) in stacked_params_mapping:
if shard_name not in name:
continue
name = name.replace(shard_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, loaded_weight, shard_id)
break
else:
# lm_head is not used in vllm as it is tied with embed_token.
# To prevent errors, skip loading lm_head.weight.
if "lm_head.weight" in name:
continue
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
# GemmaRMSNorm is different from Llama's in that it multiplies
# (1 + weight) to the output, instead of just weight.
if "norm.weight" in name:
norm_weight = loaded_weight + 1.0 # prevent inplace modify actor weights
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, norm_weight)
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
loaded_params.add(name)
unloaded_params = params_dict.keys() - loaded_params
if unloaded_params:
raise RuntimeError("Some weights are not initialized from checkpoints: "
f"{unloaded_params}")
def load_hf_weights(actor_weights: Dict, vllm_model: nn.Module):
assert isinstance(actor_weights, Dict)
with set_default_torch_dtype(next(vllm_model.parameters()).dtype): # TODO
vllm_model.load_weights(actor_weights.items())
for _, module in vllm_model.named_modules():
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_method.process_weights_after_loading(module)
# FIXME: Remove this after Mixtral is updated
# to use quant_method.
if hasattr(module, "process_weights_after_loading"):
module.process_weights_after_loading()
vllm_model = vllm_model.cuda()
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/llm.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/entrypoints/llm.py
from typing import Dict, List, Optional, Tuple, Union
from tqdm import tqdm
from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers import PretrainedConfig
import torch.nn as nn
from .arg_utils import EngineArgs
from .llm_engine_sp import LLMEngine
from vllm.lora.request import LoRARequest
from vllm.outputs import RequestOutput
from vllm.sampling_params import SamplingParams
from vllm.sequence import MultiModalData
from vllm.usage.usage_lib import UsageContext
from vllm.utils import Counter
import torch
from torch.nn.utils.rnn import pad_sequence
from verl.workers.rollout.tokenizer import HybridEngineBaseTokenizer
class LLM:
"""An LLM for generating texts from given prompts and sampling parameters.
This class includes a tokenizer, a language model (possibly distributed
across multiple GPUs), and GPU memory space allocated for intermediate
states (aka KV cache). Given a batch of prompts and sampling parameters,
this class generates texts from the model, using an intelligent batching
mechanism and efficient memory management.
NOTE: This class is intended to be used for offline inference. For online
serving, use the `AsyncLLMEngine` class instead.
NOTE: For the comprehensive list of arguments, see `EngineArgs`.
Args:
model: A HuggingFace Transformers model instance.
tokenizer: A HuggingFace Transformers tokenizer instance.
tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
if available, and "slow" will always use the slow tokenizer.
trust_remote_code: Trust remote code (e.g., from HuggingFace) when
downloading the model and tokenizer.
tensor_parallel_size: The number of GPUs to use for distributed
execution with tensor parallelism.
dtype: The data type for the model weights and activations. Currently,
we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
the `torch_dtype` attribute specified in the model config file.
However, if the `torch_dtype` in the config is `float32`, we will
use `float16` instead.
quantization: The method used to quantize the model weights. Currently,
we support "awq". If None, we assume the model weights are not
quantized and use `dtype` to determine the data type of the weights.
revision: The specific model version to use. It can be a branch name,
a tag name, or a commit id.
tokenizer_revision: The specific tokenizer version to use. It can be a
branch name, a tag name, or a commit id.
seed: The seed to initialize the random number generator for sampling.
gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
reserve for the model weights, activations, and KV cache. Higher
values will increase the KV cache size and thus improve the model's
throughput. However, if the value is too high, it may cause out-of-
memory (OOM) errors.
swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
This can be used for temporarily storing the states of the requests
when their `best_of` sampling parameters are larger than 1. If all
requests will have `best_of=1`, you can safely set this to 0.
Otherwise, too small values may cause out-of-memory (OOM) errors.
enforce_eager: Whether to enforce eager execution. If True, we will
disable CUDA graph and always execute the model in eager mode.
If False, we will use CUDA graph and eager execution in hybrid.
max_context_len_to_capture: Maximum context len covered by CUDA graphs.
When a sequence has context length larger than this, we fall back
to eager mode.
disable_custom_all_reduce: See ParallelConfig
"""
def __init__(
self,
model: Union[nn.Module, Dict], # model itself or its parameter dict
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast, HybridEngineBaseTokenizer],
model_hf_config: PretrainedConfig,
tokenizer_mode: str = "auto",
trust_remote_code: bool = False,
tensor_parallel_size: int = 1,
dtype: str = "auto",
quantization: Optional[str] = None,
revision: Optional[str] = None,
tokenizer_revision: Optional[str] = None,
seed: int = 0,
gpu_memory_utilization: float = 0.9,
swap_space: int = 4,
enforce_eager: bool = False,
max_context_len_to_capture: int = None,
disable_custom_all_reduce: bool = False,
load_format = 'auto',
**kwargs,
) -> None:
if "disable_log_stats" not in kwargs:
kwargs["disable_log_stats"] = True
engine_args = EngineArgs(
model_hf_config=model_hf_config,
tensor_parallel_size=tensor_parallel_size,
dtype=dtype,
quantization=quantization,
revision=revision,
tokenizer_revision=tokenizer_revision,
seed=seed,
gpu_memory_utilization=gpu_memory_utilization,
swap_space=swap_space,
enforce_eager=enforce_eager,
max_context_len_to_capture=max_context_len_to_capture,
disable_custom_all_reduce=disable_custom_all_reduce,
load_format=load_format,
**kwargs,
)
tokenizer_cls = (PreTrainedTokenizer, PreTrainedTokenizerFast, HybridEngineBaseTokenizer)
if not isinstance(tokenizer, tokenizer_cls):
raise ValueError(
f"Unexpected tokenizer type: {type(tokenizer)}. Must be"
"one of the following: PreTrainedTokenizer, PreTrainedTokenizerFast, verl.workers.rollout.HybridEngineBaseTokenizer"
)
self.llm_engine = LLMEngine.from_engine_args(model, tokenizer, engine_args)
self.request_counter = Counter()
def init_cache_engine(self):
self.llm_engine.init_cache_engine()
def free_cache_engine(self):
self.llm_engine.free_cache_engine()
def get_tokenizer(self) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]:
return self.llm_engine.tokenizer
def set_tokenizer(
self,
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
) -> None:
self.llm_engine.tokenizer = tokenizer
def generate(
self,
prompts: Optional[Union[str, List[str]]] = None,
sampling_params: Optional[Union[SamplingParams, List[SamplingParams]]] = None,
prompt_token_ids: Optional[List[List[int]]] = None,
use_tqdm: bool = True,
lora_request: Optional[LoRARequest] = None,
multi_modal_data: Optional[MultiModalData] = None,
) -> List[RequestOutput]:
"""Generates the completions for the input prompts.
NOTE: This class automatically batches the given prompts, considering
the memory constraint. For the best performance, put all of your prompts
into a single list and pass it to this method.
Args:
prompts: A list of prompts to generate completions for.
sampling_params: The sampling parameters for text generation. If
None, we use the default sampling parameters.
When it is a single value, it is applied to every prompt.
When it is a list, the list must have the same length as the
prompts and it is paired one by one with the prompt.
prompt_token_ids: A list of token IDs for the prompts. If None, we
use the tokenizer to convert the prompts to token IDs.
use_tqdm: Whether to use tqdm to display the progress bar.
lora_request: LoRA request to use for generation, if any.
multi_modal_data: Multi modal data.
Returns:
A list of `RequestOutput` objects containing the generated
completions in the same order as the input prompts.
"""
if prompts is None and prompt_token_ids is None:
raise ValueError("Either prompts or prompt_token_ids must be "
"provided.")
if self.llm_engine.model_config.skip_tokenizer_init \
and prompts is not None:
raise ValueError("prompts must be None if skip_tokenizer_init "
"is True")
if isinstance(prompts, str):
# Convert a single prompt to a list.
prompts = [prompts]
if (prompts is not None and prompt_token_ids is not None and len(prompts) != len(prompt_token_ids)):
raise ValueError("The lengths of prompts and prompt_token_ids "
"must be the same.")
if prompts is not None:
num_requests = len(prompts)
else:
assert prompt_token_ids is not None
num_requests = len(prompt_token_ids)
if sampling_params is None:
# Use default sampling params.
sampling_params = SamplingParams()
elif isinstance(sampling_params, list) and len(sampling_params) != num_requests:
raise ValueError("The lengths of prompts and sampling_params "
"must be the same.")
if multi_modal_data:
multi_modal_data.data = multi_modal_data.data.to(torch.float16)
# Add requests to the engine.
for i in range(num_requests):
prompt = prompts[i] if prompts is not None else None
token_ids = None if prompt_token_ids is None else prompt_token_ids[i]
if not isinstance(token_ids, list):
# NOTE(shengguangming): convert the rollout input into List[str]
token_ids = self._pre_process_inputs(token_ids)
self._add_request(
prompt,
sampling_params[i] if isinstance(sampling_params, list) else sampling_params,
token_ids,
lora_request=lora_request,
# Get ith image while maintaining the batch dim.
multi_modal_data=MultiModalData(type=multi_modal_data.type, data=multi_modal_data.data[i].unsqueeze(0))
if multi_modal_data else None,
)
return self._run_engine(use_tqdm)
def _add_request(
self,
prompt: Optional[str],
sampling_params: SamplingParams,
prompt_token_ids: Optional[List[int]],
lora_request: Optional[LoRARequest] = None,
multi_modal_data: Optional[MultiModalData] = None,
) -> None:
request_id = str(next(self.request_counter))
self.llm_engine.add_request(request_id,
prompt,
sampling_params,
prompt_token_ids,
lora_request=lora_request,
multi_modal_data=multi_modal_data)
def _run_engine(self, use_tqdm: bool) -> List[RequestOutput]:
# Initialize tqdm.
if use_tqdm:
num_requests = self.llm_engine.get_num_unfinished_requests()
pbar = tqdm(total=num_requests, desc="Processed prompts", dynamic_ncols=True)
# Run the engine.
outputs: List[RequestOutput] = []
while self.llm_engine.has_unfinished_requests():
step_outputs = self.llm_engine.step()
for output in step_outputs:
if output.finished:
outputs.append(output)
if use_tqdm:
pbar.update(1)
if use_tqdm:
pbar.close()
# Sort the outputs by request ID.
# This is necessary because some requests may be finished earlier than
# its previous requests.
outputs = sorted(outputs, key=lambda x: int(x.request_id))
# TODO(shengguangming): maybe we can hack the autoregressive logics without only apply post process for better performance
return self._post_process_outputs(outputs)
# NOTE(shengguangming): add for verl
# TODO(sgm): we can optimize it by making the dataloader yield List[int] without padding.
def _pre_process_inputs(self, prompt_token_ids: torch.Tensor) -> List[int]:
# remove the left padding in the prompt token_id
pad_token_id = self.llm_engine.tokenizer.pad_token_id if self.llm_engine.tokenizer.pad_token_id is not None else self.llm_engine.tokenizer.eos_token_id
non_pad_index = torch.nonzero(prompt_token_ids != pad_token_id, as_tuple=False)[0][0]
token_ids = prompt_token_ids[non_pad_index:].tolist()
return token_ids
# NOTE(shengguangming): add for verl
def _post_process_outputs(self, request_outputs: List[RequestOutput]) -> Tuple[torch.Tensor, torch.Tensor]:
output_token_ids = []
logprobs = []
for request_output in request_outputs: # List[RequestOutput]
outputs = request_output.outputs
for output in outputs: # List[CompletionOutput], usually len == 1
output_token_ids.append(torch.tensor(output.token_ids))
# TODO(shengguangming): can be optimzied by rewrite the Sampler._get_logprobs() logits
logprobs_dicts = output.logprobs
if logprobs_dicts is not None:
logprob = []
for logprobs_dict, id in zip(logprobs_dicts, output.token_ids):
logprob.append(logprobs_dict[id].logprob)
logprobs.append(torch.tensor(logprob))
pad_token_id = self.llm_engine.tokenizer.pad_token_id if self.llm_engine.tokenizer.pad_token_id is not None else self.llm_engine.tokenizer.eos_token_id
output_token_ids = pad_sequence(output_token_ids, batch_first=True, padding_value=pad_token_id)
if len(logprobs) > 0:
logprobs = pad_sequence(logprobs, batch_first=True, padding_value=pad_token_id)
return output_token_ids, logprobs
def sync_model_weights(self, actor_weights: Dict[str, torch.Tensor], load_format: str) -> None:
self.llm_engine.sync_model_weights(actor_weights=actor_weights, load_format=load_format)
def offload_model_weights(self) -> None:
self.llm_engine.offload_model_weights()
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/llm_engine_sp.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/engine/llm_engine.py
import torch
from typing import Dict, Optional, Union, Type
import vllm
from vllm.config import (CacheConfig, DecodingConfig, DeviceConfig, LoRAConfig, ParallelConfig, SchedulerConfig,
SpeculativeConfig, VisionLanguageConfig)
from vllm.core.scheduler import Scheduler
from vllm.engine.output_processor.interfaces import (SequenceGroupOutputProcessor)
from vllm.engine.output_processor.stop_checker import StopChecker
from vllm.executor.executor_base import ExecutorBase
from vllm.logger import init_logger
from vllm.transformers_utils.detokenizer import Detokenizer
from vllm.engine.metrics import StatLogger
from vllm.usage.usage_lib import (UsageContext, is_usage_stats_enabled, usage_message)
from vllm.utils import Counter
from vllm.engine.llm_engine import _load_generation_config_dict
from vllm.engine.llm_engine import LLMEngine
import torch.nn as nn
from .arg_utils import EngineArgs
from .tokenizer import TokenizerGroup
from .config import ModelConfig, LoadConfig
logger = init_logger(__name__)
_LOCAL_LOGGING_INTERVAL_SEC = 5
class LLMEngine(LLMEngine):
"""An LLM engine that receives requests and generates texts.
This is the main class for the vLLM engine. It receives requests
from clients and generates texts from the LLM. It includes a tokenizer, a
language model (possibly distributed across multiple GPUs), and GPU memory
space allocated for intermediate states (aka KV cache). This class utilizes
iteration-level scheduling and efficient memory management to maximize the
serving throughput.
The `LLM` class wraps this class for offline batched inference and the
`AsyncLLMEngine` class wraps this class for online serving.
NOTE: The config arguments are derived from the `EngineArgs` class. For the
comprehensive list of arguments, see `EngineArgs`.
Args:
model: the actor model initialize outside vllm (add for verl)
tokenizer: the initialized tokenizer (add for verl)
model_config: The configuration related to the LLM model.
cache_config: The configuration related to the KV cache memory
management.
parallel_config: The configuration related to distributed execution.
scheduler_config: The configuration related to the request scheduler.
distributed_init_method: The initialization method for distributed
execution. See `torch.distributed.init_process_group` for details.
placement_group: Ray placement group for distributed execution.
Required for distributed execution.
log_stats: Whether to log statistics.
"""
def __init__(
self,
# NOTE(sgm): first two arguments are added for verl
model: Union[nn.Module, Dict], # model itself or its parameter dict
tokenizer: nn.Module,
# NOTE(sgm): vllm original arguments
model_config: ModelConfig,
cache_config: CacheConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
load_config: LoadConfig,
lora_config: Optional[LoRAConfig],
vision_language_config: Optional[VisionLanguageConfig],
speculative_config: Optional[SpeculativeConfig],
decoding_config: Optional[DecodingConfig],
executor_class: Type[ExecutorBase],
log_stats: bool,
usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
) -> None:
logger.info(
"Initializing an LLM engine (v%s) with config: "
"model=%r, speculative_config=%r, tokenizer=%r, "
"skip_tokenizer_init=%s, tokenizer_mode=%s, revision=%s, "
"tokenizer_revision=%s, trust_remote_code=%s, dtype=%s, "
"max_seq_len=%d, download_dir=%r, load_format=%s, "
"tensor_parallel_size=%d, disable_custom_all_reduce=%s, "
"quantization=%s, enforce_eager=%s, kv_cache_dtype=%s, "
"quantization_param_path=%s, device_config=%s, "
"decoding_config=%r, seed=%d, served_model_name=%s)",
vllm.__version__,
model_config.model,
speculative_config,
model_config.tokenizer,
model_config.skip_tokenizer_init,
# model_config.tokenizer_mode,
model_config.revision,
model_config.tokenizer_revision,
# model_config.trust_remote_code,
model_config.dtype,
model_config.max_model_len,
load_config.download_dir,
load_config.load_format,
parallel_config.tensor_parallel_size,
parallel_config.disable_custom_all_reduce,
model_config.quantization,
model_config.enforce_eager,
cache_config.cache_dtype,
model_config.quantization_param_path,
device_config.device,
decoding_config,
model_config.seed,
# model_config.served_model_name,
)
# TODO(woosuk): Print more configs in debug mode.
self.model_config = model_config # TODO: currently is hfconfig
self.cache_config = cache_config
self.lora_config = lora_config
self.vision_language_config = vision_language_config
self.parallel_config = parallel_config
self.scheduler_config = scheduler_config
self.device_config = device_config
self.speculative_config = speculative_config
self.load_config = load_config
self.decoding_config = decoding_config or DecodingConfig()
self.log_stats = log_stats
# self.model = model # should not store the model, it should be deleted
# TODO(shengguangming): maybe we can choose init here or from arguments
if not self.model_config.skip_tokenizer_init:
# TODO: check tokenizer class
self._init_tokenizer(tokenizer)
self.detokenizer = Detokenizer(self.tokenizer)
else:
self.detokenizer = None
self.tokenizer = None
self.seq_counter = Counter()
# TODO: don't know what's the usage
self.generation_config_fields = _load_generation_config_dict(model_config)
self.model_executor = executor_class(
model=model, # add for spmd_gpu_executor
model_config=model_config,
cache_config=cache_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config,
device_config=device_config,
lora_config=lora_config,
vision_language_config=vision_language_config,
speculative_config=speculative_config,
load_config=load_config,
)
# Profile the memory usage and initialize the cache.
self._initialize_kv_caches()
# If usage stat is enabled, collect relevant info.
if is_usage_stats_enabled():
from vllm.model_executor.model_loader import (get_architecture_class_name)
usage_message.report_usage(
get_architecture_class_name(model_config),
usage_context,
extra_kvs={
# Common configuration
"dtype": str(model_config.dtype),
"tensor_parallel_size": parallel_config.tensor_parallel_size,
"block_size": cache_config.block_size,
"gpu_memory_utilization": cache_config.gpu_memory_utilization,
# Quantization
"quantization": model_config.quantization,
"kv_cache_dtype": cache_config.cache_dtype,
# Feature flags
"enable_lora": bool(lora_config),
"enable_prefix_caching": cache_config.enable_prefix_caching,
"enforce_eager": model_config.enforce_eager,
"disable_custom_all_reduce": parallel_config.disable_custom_all_reduce,
})
if self.tokenizer:
# Ping the tokenizer to ensure liveness if it runs in a
# different process.
self.tokenizer.ping()
# Create the scheduler.
# NOTE: the cache_config here have been updated with the numbers of
# GPU and CPU blocks, which are profiled in the distributed executor.
# NOTE(shengguangming): each process will have independent scheduler
self.scheduler = Scheduler(scheduler_config, cache_config, lora_config)
# Metric Logging.
if self.log_stats:
self.stat_logger = StatLogger(local_interval=_LOCAL_LOGGING_INTERVAL_SEC,
labels=dict(model_name=model_config.served_model_name),
max_model_len=self.model_config.max_model_len)
self.stat_logger.info("cache_config", self.cache_config)
# Create sequence output processor, e.g. for beam search or
# speculative decoding.
self.output_processor = (SequenceGroupOutputProcessor.create_output_processor(
self.scheduler_config,
self.detokenizer,
self.scheduler,
self.seq_counter,
self.get_tokenizer_for_seq,
stop_checker=StopChecker(
self.scheduler_config.max_model_len,
self.get_tokenizer_for_seq,
),
))
# TODO(sgm): add for verl but we may not tokenizer in Rollout
def _init_tokenizer(self, tokenizer, **tokenizer_init_kwargs):
init_kwargs = dict(enable_lora=bool(self.lora_config),
max_num_seqs=self.scheduler_config.max_num_seqs,
max_input_length=None)
init_kwargs.update(tokenizer_init_kwargs)
self.tokenizer: TokenizerGroup = TokenizerGroup(tokenizer, **init_kwargs)
def init_cache_engine(self):
# TODO: check whether we should rebuild the CUDAGraph every iter when offload/load KVCache
# Re-capture CUDAGraph would be time-consuming
self.model_executor.init_cache_engine()
def free_cache_engine(self):
self.model_executor.free_cache_engine()
# NOTE(sgm): currently, we only support GPU executor
# The GPUExecutor remove the Ray dependency
@classmethod
def from_engine_args(
cls,
model,
tokenizer,
engine_args: EngineArgs,
usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
) -> "LLMEngine":
"""Creates an LLM engine from the engine arguments."""
# Create the engine configs.
engine_config = engine_args.create_engine_config()
# Initialize the cluster and specify the executor class.
assert engine_config.device_config.device_type == "cuda", \
"Currently, the vllm in verl only support running on GPU"
if engine_config.parallel_config.world_size == 1:
engine_config.load_config.load_format = "dummy_hf"
from .spmd_gpu_executor import SPMDGPUExecutor
executor_class = SPMDGPUExecutor
# Create the LLM engine.
engine = cls(
model,
tokenizer,
**engine_config.to_dict(),
executor_class=executor_class,
log_stats=not engine_args.disable_log_stats,
usage_context=usage_context,
)
return engine
def sync_model_weights(self, actor_weights: Dict[str, torch.Tensor], load_format: str) -> None:
self.model_executor.sync_model_weights(actor_weights=actor_weights, load_format=load_format)
def offload_model_weights(self) -> None:
self.model_executor.offload_model_weights()
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/megatron_weight_loaders.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/models
from typing import Dict
import torch
import torch.nn as nn
from vllm.model_executor.layers.linear import *
from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmbedding, ParallelLMHead
from vllm.model_executor.layers.activation import ScaledActivation
from vllm.model_executor.models import ModelRegistry
# NOTE(shengguangming): replace the origin weight loader function in the class
def parallel_weight_loader(self, param: torch.Tensor, loaded_weight: torch.Tensor) -> None:
"""Parallel Linear weight loader."""
assert param.size() == loaded_weight.size(
), 'the parameter size is not align with the loaded weight size, param size: {}, loaded_weight size: {}'.format(
param.size(), loaded_weight.size())
assert param.data.dtype == loaded_weight.data.dtype, "if we want to shared weights, the data type should also be the same"
param.data = loaded_weight.data
def default_weight_loader(param: torch.Tensor, loaded_weight: torch.Tensor) -> None:
"""Default weight loader."""
assert param.size() == loaded_weight.size()
assert param.data.dtype == loaded_weight.data.dtype, "if we want to shared weights, the data type should also be the same"
param.data = loaded_weight.data
def gpt2_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "lm_head.weight" in name:
# GPT-2 ties the weights of the embedding layer and the final
# linear layer.
continue
if ".attn.bias" in name or ".attn.masked_bias" in name:
# Skip attention mask.
# NOTE: "c_attn.bias" should not be skipped.
continue
if not name.startswith("transformer."):
name = "transformer." + name
param = params_dict[name]
# The HF's GPT-2 implementation uses Conv1D instead of Linear.
# Because of this, we need to transpose the weights.
# Note(zhuohan): the logic below might break quantized models.
for conv1d_weight_name in ["c_attn", "c_proj", "c_fc"]:
if conv1d_weight_name not in name:
continue
if not name.endswith(".weight"):
continue
# TODO: check megatron
loaded_weight = loaded_weight.t()
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def llama_megatron_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def llama_megatron_core_te_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_mapping = [
# (megatron core gpt model name, vllm model name)
("embedding.word_embeddings", "model.embed_tokens"),
("self_attention.linear_qkv.layer_norm_weight", "input_layernorm.weight"),
("self_attention.linear_qkv.layer_norm_bias", "input_layernorm.bias"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_proj", 'self_attn.o_proj'),
('pre_mlp_layernorm', 'post_attention_layernorm'),
('mlp.linear_fc1.layer_norm_weight', 'post_attention_layernorm.weight'),
('mlp.linear_fc1.layer_norm_bias', 'post_attention_layernorm.bias'),
('mlp.linear_fc1', 'mlp.gate_up_proj'),
('mlp.linear_fc2', 'mlp.down_proj'),
('decoder.final_layernorm', 'model.norm'),
('output_layer', 'lm_head'),
]
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
name = _replace_name(name, params_mapping)
if name.endswith('.bias') and name not in params_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def llama_megatron_core_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_mapping = [
# (megatron core gpt model name, vllm model name)
("embedding.word_embeddings", "model.embed_tokens"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_proj", 'self_attn.o_proj'),
(
'input_layernorm',
'input_layernorm',
),
('pre_mlp_layernorm', 'post_attention_layernorm'),
('mlp.linear_fc1', 'mlp.gate_up_proj'),
('mlp.linear_fc2', 'mlp.down_proj'),
('decoder.final_layernorm', 'model.norm'),
('output_layer', 'lm_head'),
]
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
name = _replace_name(name, params_mapping)
if name.endswith('.bias') and name not in params_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def _replace_name(megatron_name, name_mapping):
for m_name, v_name in name_mapping:
if m_name not in megatron_name:
continue
if 'layers' in megatron_name: # deal with decoder layers
megatron_name = megatron_name.replace('decoder', 'model')
megatron_name_list = megatron_name.split('.')
if 'layer_norm_weight' in megatron_name_list or 'layer_norm_bias' in megatron_name_list:
param_name_list = megatron_name_list[:3]
param_name_list.append(v_name)
param_name = '.'.join(param_name_list)
else:
param_name_list = megatron_name_list[:3]
weight_or_bias = megatron_name_list[-1]
param_name_list.append(v_name)
param_name_list.append(weight_or_bias)
param_name = '.'.join(param_name_list)
return param_name
else:
param_name = megatron_name.replace(m_name, v_name)
return param_name
def llama_megatron_core_te_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_mapping = [
# (megatron core gpt model name, vllm model name)
("embedding.word_embeddings", "model.embed_tokens"),
("self_attention.linear_qkv.layer_norm_weight", "input_layernorm.weight"),
("self_attention.linear_qkv.layer_norm_bias", "input_layernorm.bias"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_proj", 'self_attn.o_proj'),
('pre_mlp_layernorm', 'post_attention_layernorm'),
('mlp.linear_fc1.layer_norm_weight', 'post_attention_layernorm.weight'),
('mlp.linear_fc1.layer_norm_bias', 'post_attention_layernorm.bias'),
('mlp.linear_fc1', 'mlp.gate_up_proj'),
('mlp.linear_fc2', 'mlp.down_proj'),
('decoder.final_layernorm', 'model.norm'),
('output_layer', 'lm_head'),
]
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
name = _replace_name(name, params_mapping)
if name.endswith('.bias') and name not in params_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def llama_megatron_core_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_mapping = [
# (megatron core gpt model name, vllm model name)
("embedding.word_embeddings", "model.embed_tokens"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_proj", 'self_attn.o_proj'),
(
'input_layernorm',
'input_layernorm',
),
('pre_mlp_layernorm', 'post_attention_layernorm'),
('mlp.linear_fc1', 'mlp.gate_up_proj'),
('mlp.linear_fc2', 'mlp.down_proj'),
('decoder.final_layernorm', 'model.norm'),
('output_layer', 'lm_head'),
]
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
name = _replace_name(name, params_mapping)
if name.endswith('.bias') and name not in params_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def _replace_name(megatron_name, name_mapping):
for m_name, v_name in name_mapping:
if m_name not in megatron_name:
continue
if 'layers' in megatron_name: # deal with decoder layers
megatron_name = megatron_name.replace('decoder', 'model')
megatron_name_list = megatron_name.split('.')
if 'layer_norm_weight' in megatron_name_list or 'layer_norm_bias' in megatron_name_list:
param_name_list = megatron_name_list[:3]
param_name_list.append(v_name)
param_name = '.'.join(param_name_list)
else:
param_name_list = megatron_name_list[:3]
weight_or_bias = megatron_name_list[-1]
param_name_list.append(v_name)
param_name_list.append(weight_or_bias)
param_name = '.'.join(param_name_list)
return param_name
else:
param_name = megatron_name.replace(m_name, v_name)
return param_name
def mistral_megatron_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
# TODO: need to implement a general way to deal with prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
__LAYER_WEIGHT_MEGATRON_LOADER_REGISTRY__ = {
ColumnParallelLinear: parallel_weight_loader,
MergedColumnParallelLinear: parallel_weight_loader,
QKVParallelLinear: parallel_weight_loader,
RowParallelLinear: parallel_weight_loader,
VocabParallelEmbedding: parallel_weight_loader,
ParallelLMHead: parallel_weight_loader
# "ScaledActivation.weight_loader": ScaledActivation, # TODO(shengguangming): latest commit in vllm fix awq for this function and add load_weights
# "default_weight_loader": default_weight_loader
}
# for layer_class, weight_loader in __LAYER_WEIGHT_MEGATRON_LOADER_REGISTRY__.items():
# # setattr(layer_class, 'megatron_weight_loader', weight_loader)
# layer_class.weight_loader = weight_loader
__MODEL_MEGATRON_WEIGHT_LOADER_REGISTRY__ = {
'GPT2LMHeadModel': gpt2_weight_loader,
'LlamaForCausalLM': llama_megatron_core_te_weight_loader, # use te backend for open-source megatron
'LLaMAForCausalLM': llama_megatron_core_te_weight_loader,
'MistralForCausalLM': mistral_megatron_weight_loader,
}
# the actor model is .state_dict()
# Load megatron weights
def load_megatron_weights(actor_weights: Dict, vllm_model: nn.Module):
weight_loader = _get_model_weight_loader(vllm_model.__class__.__name__)
weight_loader(actor_weights, vllm_model)
# NOTE(sgm) to reduce peak memory usage, we offload vllm model to cpu
# after init, and we need this after sync model weights for in first iter.
vllm_model = vllm_model.cuda()
def _get_model_weight_loader(arch: str):
if arch in __MODEL_MEGATRON_WEIGHT_LOADER_REGISTRY__:
return __MODEL_MEGATRON_WEIGHT_LOADER_REGISTRY__[arch]
raise ValueError(f"Model architectures {arch} are not supported for now. "
f"Supported architectures: {ModelRegistry.get_supported_archs()}")
def update_megatron_weight_loader():
for layer_class, weight_loader in __LAYER_WEIGHT_MEGATRON_LOADER_REGISTRY__.items():
layer_class.weight_loader = weight_loader
VocabParallelEmbedding.__init__ = vocab_init
# FIXME(shengguangming): the vLLM vocab will pad to 64, which may incur out of bounds
# so we need to rewrite the init function of vocab
DEFAULT_VOCAB_PADDING_SIZE = 64
def vocab_init(self,
num_embeddings: int,
embedding_dim: int,
params_dtype: Optional[torch.dtype] = None,
org_num_embeddings: Optional[int] = None,
padding_size: int = DEFAULT_VOCAB_PADDING_SIZE):
super(VocabParallelEmbedding, self).__init__()
# Keep the input dimensions.
# TODO (pad to be divided by 4)
self.num_embeddings = num_embeddings
self.org_vocab_size = org_num_embeddings or num_embeddings
# self.num_embeddings_padded = pad_vocab_size(num_embeddings,
# padding_size)
self.embedding_dim = embedding_dim
if params_dtype is None:
params_dtype = torch.get_default_dtype()
self.tp_size = get_tensor_model_parallel_world_size()
# Divide the weight matrix along the vocaburaly dimension.
# TODO: remove dependencies from megatron
from megatron.core.tensor_parallel.utils import VocabUtility
self.vocab_start_index, self.vocab_end_index = (VocabUtility.vocab_range_from_global_vocab_size(
self.num_embeddings, get_tensor_model_parallel_rank(), self.tp_size))
self.num_embeddings_per_partition = (self.vocab_end_index - self.vocab_start_index)
self.weight = Parameter(
torch.empty(
self.num_embeddings_per_partition,
self.embedding_dim,
# device=torch.cuda.current_device(),
dtype=params_dtype))
set_weight_attrs(self.weight, {"parallel_dim": 0, "weight_loader": self.weight_loader})
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/model_loader.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/model_loader
"""Utilities for selecting and loading models."""
from typing import Dict, Union, Optional, Iterable, Tuple
import torch
import torch.nn as nn
from transformers import PreTrainedModel
from vllm.config import (DeviceConfig, LoRAConfig, ParallelConfig, SchedulerConfig, VisionLanguageConfig)
from vllm.model_executor.model_loader import BaseModelLoader
from vllm.model_executor.model_loader.loader import _initialize_model
from vllm.model_executor.model_loader.utils import set_default_torch_dtype
from vllm.distributed.communication_op import tensor_model_parallel_all_gather
from .config import ModelConfig, LoadFormat, LoadConfig
from .megatron_weight_loaders import load_megatron_weights, update_megatron_weight_loader
from .dtensor_weight_loaders import load_dtensor_weights, update_dtensor_weight_loader
from .hf_weight_loader import update_hf_weight_loader
def get_model(actor_model: Union[PreTrainedModel, Dict], model_config: ModelConfig, load_config: LoadConfig,
device_config: DeviceConfig, parallel_config: ParallelConfig, scheduler_config: SchedulerConfig,
lora_config: Optional[LoRAConfig], vision_language_config: Optional[VisionLanguageConfig]) -> nn.Module:
loader = get_model_loader(load_config)
if load_config.load_format.startswith('dummy'):
return loader.load_model(model_config=model_config,
device_config=device_config,
lora_config=lora_config,
vision_language_config=vision_language_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config)
else:
return loader.load_model(actor_model=actor_model,
model_config=model_config,
device_config=device_config,
lora_config=lora_config,
vision_language_config=vision_language_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config)
def get_model_loader(load_config: LoadConfig) -> BaseModelLoader:
"""Get a model loader based on the load format."""
if isinstance(load_config.load_format, type):
return load_config.load_format(load_config)
if load_config.load_format == LoadFormat.AUTO:
update_megatron_weight_loader()
return MegatronLoader(load_config)
# NOTE(sgm): change the weight_loader function in runtime
if load_config.load_format == LoadFormat.MEGATRON:
update_megatron_weight_loader()
return MegatronLoader(load_config)
if load_config.load_format == LoadFormat.HF:
update_hf_weight_loader()
return HFLoader(load_config)
if load_config.load_format == LoadFormat.DTENSOR:
update_dtensor_weight_loader()
return DTensorLoader(load_config)
if load_config.load_format == LoadFormat.DUMMY_HF:
update_hf_weight_loader()
return DummyModelLoader(load_config)
if load_config.load_format == LoadFormat.DUMMY_MEGATRON:
update_megatron_weight_loader()
return DummyModelLoader(load_config)
if load_config.load_format == LoadFormat.DUMMY_DTENSOR:
update_dtensor_weight_loader()
return DummyModelLoader(load_config)
raise ValueError('load format not supported in verl: {}, only support {} and {}'.format(
load_config.load_format, LoadFormat.MEGATRON, LoadFormat.HF))
class DummyModelLoader(BaseModelLoader):
"""Model loader that will set model weights to random values."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(f"Model loader extra config is not supported for "
f"load format {load_config.load_format}")
def load_model(self, *, model_config: ModelConfig, device_config: DeviceConfig, lora_config: Optional[LoRAConfig],
vision_language_config: Optional[VisionLanguageConfig], parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig) -> nn.Module:
with set_default_torch_dtype(model_config.dtype):
with torch.device(device_config.device):
model = _initialize_model(model_config, self.load_config, lora_config, vision_language_config)
# NOTE(woosuk): For accurate performance evaluation, we assign
# random values to the weights.
# initialize_dummy_weights(model)
return model.eval()
class MegatronLoader(BaseModelLoader):
"""Model loader that can load the model weights from partitioned megatron model."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(f"Model loader extra config is not supported for "
f"load format {load_config.load_format}")
def _get_weights_iterator(actor_model: Union[PreTrainedModel, Dict]):
# NOTE(shengguangming) Load the weights from the actor model
pass
# if isinstance(actor_model, nn.Module):
# load_weights(actor_weights=dict(actor_model.named_parameters(remove_duplicate=False)), vllm_model=model)
# else:
# load_weights(actor_weights=actor_model, vllm_model=model)
# return actor_model
def load_model(self, actor_model: Union[PreTrainedModel,
Dict], model_config: ModelConfig, device_config: DeviceConfig,
lora_config: Optional[LoRAConfig], vision_language_config: Optional[VisionLanguageConfig],
parallel_config: ParallelConfig, scheduler_config: SchedulerConfig) -> nn.Module:
with set_default_torch_dtype(model_config.dtype):
with torch.device(device_config.device):
model = _initialize_model(model_config, self.load_config, lora_config, vision_language_config)
# TODO(sgm): This is a hack, we need to register the load_weight() func for each model in vllm
if isinstance(actor_model, nn.Module):
load_megatron_weights(actor_weights=dict(actor_model.named_parameters(remove_duplicate=False)),
vllm_model=model)
else:
load_megatron_weights(actor_weights=actor_model, vllm_model=model)
for _, module in model.named_modules():
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_method.process_weights_after_loading(module)
# FIXME: Remove this after Mixtral is updated
# to use quant_method.
if hasattr(module, "process_weights_after_loading"):
module.process_weights_after_loading()
# NOTE(sgm) Some weights are point to gpu, but still need this.
model = model.cuda() # NOTE (zhangchi.usc1992) We need this for vllm to profile memory usage
return model.eval()
class HFLoader(BaseModelLoader):
"""Model loader that can load the model weights from model's full params."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(f"Model loader extra config is not supported for "
f"load format {load_config.load_format}")
def _get_weights_iterator(self, actor_model: Union[PreTrainedModel, Dict]):
if isinstance(actor_model, Dict):
return actor_model.items()
elif isinstance(actor_model, nn.Module):
return dict(actor_model.named_parameters()).items()
else:
raise ValueError(f'actor model should be Dict or nn.Module, but get {type(actor_model)}')
def load_model(self, actor_model: Union[PreTrainedModel,
Dict], model_config: ModelConfig, device_config: DeviceConfig,
lora_config: Optional[LoRAConfig], vision_language_config: Optional[VisionLanguageConfig],
parallel_config: ParallelConfig, scheduler_config: SchedulerConfig) -> nn.Module:
with set_default_torch_dtype(model_config.dtype):
# with torch.device(device_config.device):
# NOTE(sgm): init the model in cpu
model = _initialize_model(model_config, self.load_config, lora_config, vision_language_config)
model.load_weights(self._get_weights_iterator(actor_model))
for _, module in model.named_modules():
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_method.process_weights_after_loading(module)
# FIXME: Remove this after Mixtral is updated
# to use quant_method.
if hasattr(module, "process_weights_after_loading"):
module.process_weights_after_loading()
# NOTE(sgm) Some weights are point to gpu, but still need this.
model = model.cuda() # NOTE (zhangchi.usc1992) We need this for vllm to profile memory usage
return model.eval()
class DTensorLoader(BaseModelLoader):
"""Model loader that can load the model weights from partitioned megatron model."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(f"Model loader extra config is not supported for "
f"load format {load_config.load_format}")
def _get_weights_iterator(actor_model: Union[PreTrainedModel, Dict]):
# NOTE(shengguangming) Load the weights from the actor model
pass
# if isinstance(actor_model, nn.Module):
# load_weights(actor_weights=dict(actor_model.named_parameters(remove_duplicate=False)), vllm_model=model)
# else:
# load_weights(actor_weights=actor_model, vllm_model=model)
# return actor_model
def load_model(self, actor_model: Union[PreTrainedModel,
Dict], model_config: ModelConfig, device_config: DeviceConfig,
lora_config: Optional[LoRAConfig], vision_language_config: Optional[VisionLanguageConfig],
parallel_config: ParallelConfig, scheduler_config: SchedulerConfig) -> nn.Module:
with set_default_torch_dtype(model_config.dtype):
with torch.device(device_config.device):
model = _initialize_model(model_config, self.load_config, lora_config, vision_language_config)
# TODO(sgm): This is a hack, we need to register the load_weight() func for each model in vllm
if isinstance(actor_model, nn.Module):
load_dtensor_weights(actor_weights=dict(actor_model.named_parameters(remove_duplicate=False)),
vllm_model=model)
else:
load_dtensor_weights(actor_weights=actor_model, vllm_model=model)
for _, module in model.named_modules():
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_method.process_weights_after_loading(module)
# FIXME: Remove this after Mixtral is updated
# to use quant_method.
if hasattr(module, "process_weights_after_loading"):
module.process_weights_after_loading()
# NOTE(sgm) Some weights are point to gpu, but still need this.
model = model.cuda() # NOTE (zhangchi.usc1992) We need this for vllm to profile memory usage
return model.eval()
# FIXME(sgm): hack the _get_logits function in vllm v0.4.2
# as they use ray, the _get_logits result will only need to return to the driver node,
# therefore gather is enough. However, we use SPMD instead of a central scheduler,
# all_gather is required (aligned with v0.2.6)
def _get_logits(self, hidden_states: torch.Tensor, embedding: torch.Tensor,
embedding_bias: Optional[torch.Tensor]) -> torch.Tensor:
# Get the logits for the next tokens.
logits = torch.matmul(hidden_states, embedding.t())
if embedding_bias is not None:
logits += embedding_bias
logits = tensor_model_parallel_all_gather(logits)
# Remove paddings in vocab (if any).
if logits is not None:
logits = logits[:, :self.org_vocab_size]
return logits
from vllm.model_executor.layers.logits_processor import LogitsProcessor
LogitsProcessor._get_logits = _get_logits
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/model_runner.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/worker/model_runner.py
import torch
import torch.nn as nn
from enum import IntEnum
from typing import Dict, List, Optional, Set, Tuple, Union
from vllm.attention import (AttentionMetadata, get_attn_backend)
from vllm.config import (DeviceConfig, LoRAConfig, ParallelConfig, SchedulerConfig, VisionLanguageConfig)
from vllm.logger import init_logger
from vllm.lora.layers import LoRAMapping
from vllm.lora.request import LoRARequest
from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager
from vllm.model_executor import SamplingMetadata
from vllm.sequence import (MultiModalData, SamplerOutput, SequenceData, SequenceGroupMetadata)
from vllm.utils import (CudaMemoryProfiler, is_hip, is_pin_memory_available)
from vllm.worker.model_runner import ModelRunner, CUDAGraphRunner
from .model_loader import get_model
from .config import ModelConfig, LoadConfig
logger = init_logger(__name__)
# How batches are constructed.
class BatchType(IntEnum):
# Every batch is prefill.
PREFILL = 0
# Every batch is decode.
DECODE = 1
# Batch is a mixture of prefill and decode.
MIXED = 2
class ModelRunner(ModelRunner):
def __init__(
self,
model: Union[nn.Module, Dict], # model itself or its parameter dict
model_config: ModelConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
load_config: LoadConfig,
lora_config: Optional[LoRAConfig],
kv_cache_dtype: Optional[str] = "auto",
vision_language_config: Optional[VisionLanguageConfig] = None,
):
self.model_config = model_config
self.parallel_config = parallel_config
self.scheduler_config = scheduler_config
self.lora_config = lora_config
self.load_config = load_config
# model_config can be None in tests/samplers/test_sampler.py.
# FIXME(woosuk): This is a hack to make the tests work. Refactor this.
self.sliding_window = (model_config.get_sliding_window() if model_config is not None else None)
self.device_config = (device_config if device_config is not None else DeviceConfig())
self.device = self.device_config.device
# NOTE(sgm): add for verl
self.model = model # this will be replaced by get_model()
# Set after load_model.
self.lora_manager: LRUCacheWorkerLoRAManager = None
self.graph_runners: Dict[int, CUDAGraphRunner] = {}
self.graph_memory_pool: Optional[Tuple[int, int]] = None # Set during graph capture.
self.max_seq_len_to_capture = (self.model_config.max_seq_len_to_capture if self.model_config is not None else 0)
self.pin_memory = is_pin_memory_available()
self.kv_cache_dtype = kv_cache_dtype
self.vision_language_config = vision_language_config
self.attn_backend = get_attn_backend(self.model_config.dtype if model_config is not None else None)
# Lazy initialization
self.block_size: int # Set after initial profiling.
# When using CUDA graph, the input block tables must be padded to
# max_seq_len_to_capture. However, creating the block table in
# Python can be expensive. To optimize this, we cache the block table
# in numpy and only copy the actual input content at every iteration.
# The shape of the cached block table will be
# (max batch size to capture, max context len to capture / block size).
self.graph_block_tables: torch.Tensor # Set after initial profiling.
# Set if the backend is flashinfer.
self.flashinfer_workspace_buffer: torch.Tensor
# NOTE(sgm): initialize model using the actor model
def load_model(self) -> None:
with CudaMemoryProfiler() as m:
self.model = get_model(actor_model=self.model,
model_config=self.model_config,
device_config=self.device_config,
lora_config=self.lora_config,
load_config=self.load_config,
parallel_config=self.parallel_config,
scheduler_config=self.scheduler_config,
vision_language_config=self.vision_language_config)
self.model_memory_usage = m.consumed_memory
logger.info("Loading model weights took %.4f GB", self.model_memory_usage / float(2**30))
if self.lora_config:
assert hasattr(self.model, "supported_lora_modules") and self.model.supported_lora_modules, (
"Model does not support LoRA")
assert hasattr(self.model, "embedding_modules"), "Model does not have embedding_modules"
assert hasattr(self.model, "embedding_padding_modules"), "Model does not have embedding_padding_modules"
self.lora_manager = LRUCacheWorkerLoRAManager(self.scheduler_config.max_num_seqs,
self.scheduler_config.max_num_batched_tokens, self.vocab_size,
self.lora_config, self.device, self.model.embedding_modules,
self.model.embedding_padding_modules)
self.model = self.lora_manager.create_lora_manager(self.model)
if self.kv_cache_dtype == "fp8" and is_hip():
# Currently scaled KV cache is only enabled on ROCm
if self.model_config.quantization_param_path is not None:
if callable(getattr(self.model, "load_kv_cache_scales", None)):
self.model.load_kv_cache_scales(self.model_config.quantization_param_path)
else:
raise RuntimeError(
"Using FP8 KV cache and scaling factors provided but "
"model %s does not support loading scaling factors.", self.model.__class__)
else:
logger.warning("Using FP8 KV cache but no scaling factors "
"provided. Defaulting to scaling factors of 1.0. "
"This may lead to less accurate results!")
elif self.model_config.quantization_param_path is not None:
logger.warning("KV cache scaling factors provided, "
"but the KV cache data type is not FP8. "
"KV cache scaling factors will not be used.")
def prepare_input_tensors(
self,
seq_group_metadata_list: List[SequenceGroupMetadata],
) -> Tuple[torch.Tensor, torch.Tensor, AttentionMetadata, SamplingMetadata, Set[LoRARequest], LoRAMapping,
torch.Tensor]:
# NOTE(sgm): all workers prepare the input in the same way
prefill_reqs = []
decode_reqs = []
for seq_group_meta in seq_group_metadata_list:
if seq_group_meta.is_prompt:
prefill_reqs.append(seq_group_meta)
else:
decode_reqs.append(seq_group_meta)
# Prepare input tensors.
(
input_tokens,
input_positions,
prefill_attn_metadata,
seq_lens,
query_lens,
lora_index_mapping,
lora_prompt_mapping,
lora_requests,
multi_modal_input,
slot_mapping,
) = self._prepare_prompt(prefill_reqs)
(
decode_input_tokens,
decode_input_positions,
decode_attn_metadata,
decode_lora_index_mapping,
decode_lora_prompt_mapping,
decode_lora_requests,
decode_slot_mapping,
) = self._prepare_decode(decode_reqs)
sampling_metadata = SamplingMetadata.prepare(seq_group_metadata_list, seq_lens, query_lens, self.device,
self.pin_memory)
if not self.scheduler_config.chunked_prefill_enabled:
assert (len(prefill_reqs) and len(decode_reqs)) == 0
num_prefills = len(seq_lens)
num_prefill_tokens = len(input_tokens)
num_decode_tokens = len(decode_input_tokens)
# Coalesce tensors. Note that attn_metadata is currently not
# coalesced for simplicity.
input_tokens.extend(decode_input_tokens)
input_positions.extend(decode_input_positions)
slot_mapping.extend(decode_slot_mapping)
lora_index_mapping.extend(decode_lora_index_mapping)
lora_prompt_mapping.extend(decode_lora_prompt_mapping)
lora_requests.update(decode_lora_requests)
input_tokens = torch.tensor(input_tokens, dtype=torch.long, device=self.device)
input_positions = torch.tensor(input_positions, dtype=torch.long, device=self.device)
slot_mapping = torch.tensor(slot_mapping, dtype=torch.long, device=self.device)
if self.lora_config:
lora_mapping = LoRAMapping(
lora_index_mapping,
lora_prompt_mapping,
)
else:
lora_mapping = None
# Broadcast the metadata.
# If batch contains both prefill and decode, it sends 2 broadcasts.
# If it only contains 1 type, it triggers a single broadcast.
if (prefill_attn_metadata is not None and decode_attn_metadata is not None):
batch_type = BatchType.MIXED
elif prefill_attn_metadata is not None:
batch_type = BatchType.PREFILL
else:
batch_type = BatchType.DECODE
attn_metadata = AttentionMetadata(
num_prefills=num_prefills,
slot_mapping=slot_mapping,
num_prefill_tokens=num_prefill_tokens,
num_decode_tokens=num_decode_tokens,
prefill_metadata=prefill_attn_metadata,
decode_metadata=decode_attn_metadata,
kv_cache_dtype=self.kv_cache_dtype,
)
return (input_tokens, input_positions, attn_metadata, sampling_metadata, lora_requests, lora_mapping,
multi_modal_input)
@torch.inference_mode()
def execute_model(
self,
seq_group_metadata_list: List[SequenceGroupMetadata],
kv_caches: List[torch.Tensor],
) -> Optional[SamplerOutput]:
(input_tokens, input_positions, attn_metadata, sampling_metadata, lora_requests, lora_mapping,
multi_modal_input) = self.prepare_input_tensors(seq_group_metadata_list)
if self.lora_config:
self.set_active_loras(lora_requests, lora_mapping)
# Currently cuda graph is only supported by the decode phase.
prefill_meta = attn_metadata.prefill_metadata
decode_meta = attn_metadata.decode_metadata
if prefill_meta is None and decode_meta.use_cuda_graph:
graph_batch_size = input_tokens.shape[0]
model_executable = self.graph_runners[graph_batch_size]
else:
model_executable = self.model
execute_model_kwargs = {
"input_ids": input_tokens,
"positions": input_positions,
"kv_caches": kv_caches,
"attn_metadata": attn_metadata,
}
if self.vision_language_config:
execute_model_kwargs.update({"image_input": multi_modal_input})
hidden_states = model_executable(**execute_model_kwargs)
# Compute the logits.
logits = self.model.compute_logits(hidden_states, sampling_metadata)
# Only perform sampling in the driver worker.
# if not self.is_driver_worker:
# return None
# TODO(sgm): perform sampling on rank 0
# Sample the next token.
output = self.model.sample(
logits=logits,
sampling_metadata=sampling_metadata,
)
return output
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/parallel_state.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Adapted from
# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/parallel_state.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Model and data parallel groups."""
import os
import torch
import torch.distributed
from typing import Optional
import vllm.distributed.parallel_state as ps
import vllm.envs as envs
from vllm.logger import init_logger
from torch.distributed.device_mesh import init_device_mesh
logger = init_logger(__name__)
"""
This version is strongly tied with Megatron to implement HybridEngine and weight sharing between vllm and Megatron.
- We assume the Megatron tp+dp+pp world is already established before calling this function.
"""
# Device mesh for using DTensor
_DEVICE_MESH = None
# Tensor model parallel group that the current rank belongs to.
_TP_DEVICE_GROUP = None
_TP_CPU_GROUP = None
# This method is for initializing the ParallelGroup when using HybridEngine
def initialize_parallel_state(
distributed_init_method: str = "env://",
backend: str = "nccl",
tensor_model_parallel_size: int = 1,
num_tp_per_train_tp: int = 1,
pipeline_model_parallel_size: int = 1,
):
# torch.distributed.all_reduce does not free the input tensor until
# the synchronization point. This causes the memory usage to grow
# as the number of all_reduce calls increases. This env var disables
# this behavior.
# Related issue:
# https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
# NOTE(sgm): Modify for verl, Env vars will be set by TORCHRUN.
rank = int(os.getenv("RANK", "-1"))
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# Use the world_size set by TORCHRUN
world_size = int(os.getenv("WORLD_SIZE", "-1"))
assert world_size != -1, "The world_size is set to -1, not initialized by TORCHRUN"
ps.init_distributed_environment(world_size, rank, distributed_init_method, local_rank, backend)
if torch.distributed.get_world_size() > 1:
# NOTE: build a sepearate inference group with infer tp & micro dp
initialize_model_parallel_for_vllm(tensor_model_parallel_size=tensor_model_parallel_size,
num_tensor_model_parallel_groups_per_train_tp=num_tp_per_train_tp)
else:
initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, backend)
def ensure_model_parallel_initialized(
tensor_model_parallel_size: int,
pipeline_model_parallel_size: int = 1,
backend: Optional[str] = None,
) -> None:
"""Helper to initialize model parallel groups if they are not initialized,
or ensure tensor-parallel and pipeline-parallel sizes are equal to expected
values if the model parallel groups are initialized.
"""
# get the backend of _DEVICE_WORLD_GROUP
backend = backend or torch.distributed.get_backend()
if not model_parallel_is_initialized():
initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, backend)
return
assert (get_tensor_model_parallel_world_size() == tensor_model_parallel_size), (
"tensor parallel group already initialized, but of unexpected size: "
f"{get_tensor_model_parallel_world_size()=} vs. "
f"{tensor_model_parallel_size=}")
# assert (get_pipeline_model_parallel_world_size(
# ) == pipeline_model_parallel_size), (
# "pipeline parallel group already initialized, but of unexpected size: "
# f"{get_pipeline_model_parallel_world_size()=} vs. "
# f"{pipeline_model_parallel_size=}")
def model_parallel_is_initialized():
"""Check if tensor and pipeline parallel groups are initialized."""
return (ps._TP_DEVICE_GROUP is not None)
# and _PIPELINE_MODEL_PARALLEL_GROUP is not None)
def initialize_model_parallel_for_vllm(tensor_model_parallel_size: int,
num_tensor_model_parallel_groups_per_train_tp: int = 1) -> None:
from torch.distributed import new_group
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
assert isinstance(tensor_model_parallel_size, int)
# assert num_tensor_model_parallel_groups_per_train_tp == 1 and not different_tp_group
# assert num_tensor_model_parallel_groups_per_train_tp > 1 and different_tp_group
# Build the tensor model-parallel groups.
assert ps._TP_DEVICE_GROUP is None, ("tensor model parallel group is already initialized")
global _TP_DEVICE_GROUP
global _TP_CPU_GROUP
global _DEVICE_MESH
world_size: int = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
backend = torch.distributed.get_backend()
num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size
if num_tensor_model_parallel_groups_per_train_tp == 1:
# if tensor_model_parallel_size == train_tensor_parallel_size:
# using the same tp group as Megatron/vllm
for i in range(num_tensor_model_parallel_groups):
ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
group = torch.distributed.new_group(ranks, backend=backend)
cpu_group = torch.distributed.new_group(ranks, backend="gloo")
if rank in ranks:
_TP_DEVICE_GROUP = group
_TP_CPU_GROUP = cpu_group
ps._TP_DEVICE_GROUP = group
ps._TP_CPU_GROUP = cpu_group
# no _MICRO_DATA_PARALLEL_GROUP
else:
# initialize a micro_dp group and a tp group
# assume training tp=4, infer tp=2, then, weight is partitioned as
# [1], [2], [3], [4] for training and [1,2], [1,2], [3,4], [3,4] for inference
# Build the inference tp groups
# train_tp = train_tensor_parallel_size
train_tp = num_tensor_model_parallel_groups_per_train_tp * tensor_model_parallel_size
# num_tensor_model_parallel_groups_per_train_tp = train_tp // tensor_model_parallel_size
assert _TP_DEVICE_GROUP is None, ("tensor model parallel group is already initialized")
for i in range(num_tensor_model_parallel_groups // num_tensor_model_parallel_groups_per_train_tp):
start = train_tp * i
end = train_tp * (i + 1)
for j in range(num_tensor_model_parallel_groups_per_train_tp):
ranks = list(range(start, end, num_tensor_model_parallel_groups_per_train_tp))
for i in range(len(ranks)):
ranks[i] += j
group = torch.distributed.new_group(ranks)
cpu_group = torch.distributed.new_group(ranks, backend='gloo')
if rank in ranks:
_TP_DEVICE_GROUP = group
_TP_CPU_GROUP = cpu_group
ps._TP_DEVICE_GROUP = _TP_DEVICE_GROUP
ps._TP_CPU_GROUP = cpu_group
# Build the pipeline model-parallel groups.
# global _PIPELINE_MODEL_PARALLEL_GROUP
# global _PIPELINE_GLOBAL_RANKS
# assert ps._PIPELINE_MODEL_PARALLEL_GROUP is None, ("pipeline model parallel group is already initialized")
# ps._PIPELINE_MODEL_PARALLEL_GROUP = mpu.get_pipeline_model_parallel_group()
# ps._PIPELINE_GLOBAL_RANKS = mpu.get_pipeline_model_parallel_ranks()
def initialize_model_parallel(
tensor_model_parallel_size: int = 1,
pipeline_model_parallel_size: int = 1,
backend: Optional[str] = None,
) -> None:
"""
NOTE: This method is a hack from the open-sourced version without
asertion of world_size = tp * pp
Initialize model parallel groups.
Arguments:
tensor_model_parallel_size: number of GPUs used for tensor model
parallelism.
pipeline_model_parallel_size: number of GPUs used for pipeline model
parallelism.
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
the model pipeline. The present function will
create 4 tensor model-parallel groups and 2 pipeline model-parallel groups:
4 tensor model-parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 pipeline model-parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
world_size: int = torch.distributed.get_world_size()
# get the backend of _DEVICE_WORLD_GROUP
backend = backend or torch.distributed.get_backend()
# NOTE(sgm) we don't assert world_size == tp * pp
# DP is not managed by vllm but by the verl WorkerGroup
num_tensor_model_parallel_groups: int = (world_size // tensor_model_parallel_size)
num_pipeline_model_parallel_groups: int = (world_size // pipeline_model_parallel_size)
rank = torch.distributed.get_rank()
# Build device mesh for TP
if num_tensor_model_parallel_groups > 1:
device_mesh = init_device_mesh("cuda", (num_tensor_model_parallel_groups, tensor_model_parallel_size),
mesh_dim_names=("replicate", "tp_shard"))
else:
device_mesh = init_device_mesh("cuda", (tensor_model_parallel_size,), mesh_dim_names=["tp_shard"])
shard_group = device_mesh.get_group(mesh_dim="tp_shard")
# Build the tensor model-parallel groups.
global _TP_DEVICE_GROUP, _TP_CPU_GROUP
global _DEVICE_MESH
assert _TP_DEVICE_GROUP is None, ("tensor model parallel group is already initialized")
assert _DEVICE_MESH is None, ("device mesh in vllm is already initialized")
_DEVICE_MESH = device_mesh
# for i in range(num_tensor_model_parallel_groups):
# ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
# group = torch.distributed.new_group(ranks, backend=backend)
# cpu_group = torch.distributed.new_group(ranks, backend="gloo")
# assert torch.distributed.get_process_group_ranks(shard_group) == torch.distributed.get_process_group_ranks(cpu_group)
# ranks = torch.distributed.get_process_group_ranks(shard_group)
# cpu_group = torch.distributed.new_group(ranks, backend="gloo") # TODO: this will hang
# cpu_group = torch.distributed.new_group(, backend="gloo")
# if rank == 0:
# print(f'rank: {rank}')
# print(f'ranks: {ranks}')
# print(f'torch.distributed.get_process_group_ranks(shard_group): {torch.distributed.get_process_group_ranks(shard_group)}')
# if rank in ranks:
_TP_DEVICE_GROUP = shard_group
ps._TP_DEVICE_GROUP = _TP_DEVICE_GROUP
# ps._TP_CPU_GROUP = cpu_group # TODO: will hang when used with device mesh
# TODO: init using device mesh
# Build the pipeline model-parallel groups.
assert ps._PIPELINE_MODEL_PARALLEL_GROUP is None, ("pipeline model parallel group is already initialized")
for i in range(num_pipeline_model_parallel_groups):
ranks = range(i, world_size, num_pipeline_model_parallel_groups)
group = torch.distributed.new_group(ranks, backend=backend)
if rank in ranks:
ps._PIPELINE_MODEL_PARALLEL_GROUP = group
ps._PIPELINE_GLOBAL_RANKS = ranks
"""
Device mesh utilities
"""
def get_device_mesh():
assert _DEVICE_MESH is not None, ("device mesh is not initialized")
return _DEVICE_MESH
"""
Tensor model parallel utilities
"""
def get_tensor_model_parallel_group():
"""Get the tensor model parallel group the caller rank belongs to."""
assert _TP_DEVICE_GROUP is not None, ("tensor model parallel group is not initialized")
return _TP_DEVICE_GROUP
def get_tensor_model_parallel_world_size():
"""Return world size for the tensor model parallel group."""
return torch.distributed.get_world_size(group=get_tensor_model_parallel_group())
def get_tensor_model_parallel_rank():
"""Return my rank for the tensor model parallel group."""
return torch.distributed.get_rank(group=get_tensor_model_parallel_group())
def get_tensor_model_parallel_src_rank():
"""Calculate the global rank corresponding to the first local rank
in the tensor model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_tensor_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/spmd_gpu_executor.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/executor/gpu_executor.py
import os
import socket
from typing import Any, Dict, List, Optional, Set, Tuple
import torch
import vllm.envs as envs
from vllm.executor.executor_base import ExecutorBase, ExecutorAsyncBase
from vllm.logger import init_logger
from vllm.lora.request import LoRARequest
from vllm.sequence import SamplerOutput, ExecuteModelRequest
from vllm.config import (CacheConfig, DeviceConfig, LoRAConfig, ParallelConfig, SchedulerConfig, SpeculativeConfig,
VisionLanguageConfig)
from .config import ModelConfig, LoadConfig
logger = init_logger(__name__)
class SPMDGPUExecutor(ExecutorBase):
"""SPMD-based multi-GPU executor implementations."""
def __init__(
self,
model, # pytorch model itself or its parameter dict
model_config: ModelConfig,
cache_config: CacheConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
load_config: LoadConfig,
lora_config: Optional[LoRAConfig],
vision_language_config: Optional[VisionLanguageConfig],
speculative_config: Optional[SpeculativeConfig],
) -> None:
self.model_config = model_config
self.cache_config = cache_config
self.lora_config = lora_config
self.load_config = load_config
self.parallel_config = parallel_config
self.scheduler_config = scheduler_config
self.device_config = device_config
self.vision_language_config = vision_language_config
self.speculative_config = speculative_config
distributed_init_method = initialize_cluster(parallel_config)
self._init_executor(model, distributed_init_method)
# TODO(sgm): verl not support speculative decode now
def _init_executor(self, model, distributed_init_method) -> None:
assert (not self.speculative_config), "Speculative decoding not yet supported for multi-GPU backend."
# Create the parallel worker for each GPU.
self._init_workers_sp(model, distributed_init_method)
def _init_workers_sp(self, model, distributed_init_method: str):
# Lazy import the Worker to avoid importing torch.cuda/xformers
# before CUDA_VISIBLE_DEVICES is set in the Worker
from .worker import Worker # pylint: disable=import-outside-toplevel
rank = int(os.getenv("RANK"))
local_rank = int(os.getenv("LOCAL_RANK"))
print(f'local rank {local_rank}')
self.worker = Worker(
model,
self.model_config,
self.parallel_config,
self.scheduler_config,
self.device_config,
self.cache_config,
self.load_config,
local_rank,
rank,
distributed_init_method,
lora_config=self.lora_config,
vision_language_config=self.vision_language_config,
)
# NOTE(shengguangming): torch.distributed.init_process_group will be called inside the init_model()
self.worker.init_device()
self.worker.load_model()
def determine_num_available_blocks(self) -> Tuple[int, int]:
"""Determine the number of available KV blocks.
This invokes `determine_num_available_blocks` on each worker and takes
the min of the results, guaranteeing that the selected cache sizes are
compatible with all workers.
Returns:
- tuple[num_gpu_blocks, num_cpu_blocks]
"""
# Get the maximum number of blocks that can be allocated on GPU and CPU.
num_blocks = self.worker.determine_num_available_blocks()
# NOTE(shengguangming): Now we don't use a shared centralized controler but each process will
# have its own scheduler
num_gpu_blocks = num_blocks[0]
num_cpu_blocks = num_blocks[1]
return num_gpu_blocks, num_cpu_blocks
def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks: int) -> None:
"""Initialize the KV cache in all workers.
"""
# NOTE: We log here to avoid multiple logs when number of workers is
# greater than one. We could log in the engine, but not all executors
# have GPUs.
logger.info("# GPU blocks: %d, # CPU blocks: %d", num_gpu_blocks, num_cpu_blocks)
self.cache_config.num_gpu_blocks = num_gpu_blocks
self.cache_config.num_cpu_blocks = num_cpu_blocks
if torch.distributed.get_rank() == 0:
print(
f'before init cache memory allocated: {torch.cuda.memory_allocated() / 1e9}GB, reserved: {torch.cuda.memory_reserved() / 1e9}GB'
)
self.worker.initialize_cache(num_gpu_blocks=num_gpu_blocks, num_cpu_blocks=num_cpu_blocks)
if torch.distributed.get_rank() == 0:
print(
f'after init cache memory allocated: {torch.cuda.memory_allocated() / 1e9}GB, reserved: {torch.cuda.memory_reserved() / 1e9}GB'
)
# NOTE(sgm): This will not profile & capture the model(CUDAGraph) when rebuilding KVCache
def init_cache_engine(self) -> None:
self.worker._init_cache_engine()
def free_cache_engine(self) -> None:
self.worker.free_cache_engine()
def execute_model(self, execute_model_req) -> List[SamplerOutput]:
all_outputs = self.worker.execute_model(execute_model_req=execute_model_req)
# NOTE(sgm):
# Each GPU in vllm under verl has its own spmd_gpu_executor, therefore all GPUs should return the outputs
# In vllm with ray, only the driver worker returns the sampling results.
return all_outputs
def add_lora(self, lora_request: LoRARequest) -> bool:
assert lora_request.lora_int_id > 0, "lora_id must be greater than 0."
return self.worker.add_lora(lora_request=lora_request)
def remove_lora(self, lora_id: int) -> bool:
assert lora_id > 0, "lora_id must be greater than 0."
return self.worker.remove_lora(lora_id=lora_id)
def list_loras(self) -> Set[int]:
return self.worker.list_loras()
def check_health(self) -> None:
# SPMDExecutor will always be healthy as long as
# it's running.
return
# NOTE(sgm): add for verl
def offload_model_weights(self) -> None:
self.worker.offload_model_weights()
def sync_model_weights(self, actor_weights: Dict[str, torch.Tensor], load_format: str) -> None:
self.worker.sync_model_weights(actor_weights=actor_weights, load_format=load_format)
def initialize_cluster(
parallel_config: ParallelConfig,
engine_use_ray: bool = False,
ray_address: Optional[str] = None,
) -> Tuple[str, Optional[None]]:
"""Initialize the distributed cluster probably with Ray.
Args:
parallel_config: The configurations for parallel execution.
Returns:
The `distributed_init_method` is the address for initializing the
distributed backend.
"""
# Initialize cluster locally.
port = get_open_port()
# We need to setup the distributed init method to make sure
# the distributed megatron code (e.g., get world size) works correctly.
# distributed_init_method = f"tcp://localhost:{port}"
distributed_init_method = 'env://'
return distributed_init_method
def get_open_port():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", 0))
return s.getsockname()[1]
# TODO(sgm): not implemented async executor yet
class SPMDGPUExecutorAsync(SPMDGPUExecutor, ExecutorAsyncBase):
async def execute_model_async(self, execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]:
"""Executes one model step on the given sequences."""
raise NotImplementedError
async def check_health_async(self) -> None:
"""Checks if the executor is healthy. If not, it should raise an
exception."""
self.check_health()
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/tokenizer.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/transformers_utils/tokenizer_group/tokenizer_group.py
from typing import List, Optional, Tuple, Union
from transformers import (AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast)
from vllm.lora.request import LoRARequest
from vllm.utils import make_async, LRUCache
from vllm.transformers_utils.tokenizers import *
class TokenizerGroup:
"""A group of tokenizers that can be used for LoRA adapters."""
def __init__(self, tokenizer: PreTrainedTokenizer, enable_lora: bool, max_num_seqs: int,
max_input_length: Optional[int]):
self.enable_lora = enable_lora
self.max_input_length = max_input_length
self.tokenizer = tokenizer
self.lora_tokenizers = LRUCache[PreTrainedTokenizer](capacity=max_num_seqs) if enable_lora else None
def ping(self) -> bool:
"""Check if the tokenizer group is alive."""
return True
def get_max_input_len(self, lora_request: Optional[LoRARequest] = None) -> Optional[int]:
"""Get the maximum input length for the LoRA request."""
return self.max_input_length
def encode(self,
prompt: str,
request_id: Optional[str] = None,
lora_request: Optional[LoRARequest] = None) -> List[int]:
tokenizer = self.get_lora_tokenizer(lora_request)
return tokenizer.encode(prompt)
async def encode_async(self,
prompt: str,
request_id: Optional[str] = None,
lora_request: Optional[LoRARequest] = None) -> List[int]:
tokenizer = await self.get_lora_tokenizer_async(lora_request)
return tokenizer.encode(prompt)
def get_lora_tokenizer(self, lora_request: Optional[LoRARequest]) -> "PreTrainedTokenizer":
if not lora_request or not self.enable_lora:
return self.tokenizer
if lora_request.lora_int_id not in self.lora_tokenizers:
# TODO(sgm): the lora tokenizer is also passed, but may be different
tokenizer = self.tokenizer
# tokenizer = (get_lora_tokenizer(
# lora_request, **self.tokenizer_config) or self.tokenizer)
self.lora_tokenizers.put(lora_request.lora_int_id, tokenizer)
return tokenizer
else:
return self.lora_tokenizers.get(lora_request.lora_int_id)
# FIXME(sgm): for simplicity, we assign the special token here
@property
def pad_token_id(self):
return self.tokenizer.pad_token_id
@property
def eos_token_id(self):
return self.tokenizer.eos_token_id
================================================
FILE: verl/third_party/vllm/vllm_v_0_4_2/worker.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/worker/worker.py
"""A GPU worker class."""
import os
import gc
from typing import Dict, List, Tuple, Optional, Union
import torch
import torch.distributed
import torch.nn as nn
from vllm.config import (CacheConfig, DeviceConfig, LoRAConfig, ParallelConfig, SchedulerConfig, VisionLanguageConfig)
from vllm.model_executor import set_random_seed
from vllm.sequence import SamplerOutput, ExecuteModelRequest
from vllm.worker.cache_engine import CacheEngine
from vllm.distributed.device_communicators import pynccl_utils
from vllm.distributed.device_communicators.custom_all_reduce import (init_custom_ar)
# TODO(sgm): check why vllm has similar file in vllm.model_executor.parallel_utils.parallel_state
from vllm.distributed import get_tensor_model_parallel_cpu_group, init_distributed_environment, get_tensor_model_parallel_group
from vllm.worker.worker import Worker, _check_if_gpu_supports_dtype
from .model_runner import ModelRunner
from .megatron_weight_loaders import load_megatron_weights
from .hf_weight_loader import load_hf_weights
from .dtensor_weight_loaders import load_dtensor_weights
from .parallel_state import (ensure_model_parallel_initialized)
from .config import ModelConfig, LoadConfig, LoadFormat
class Worker(Worker):
"""A worker class that executes (a partition of) the model on a GPU.
Each worker is associated with a single GPU. The worker is responsible for
maintaining the KV cache and executing the model on the GPU. In case of
distributed inference, each worker is assigned a partition of the model.
"""
def __init__(
self,
model: Union[nn.Module, Dict], # model itself or its parameter dict
model_config: ModelConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
cache_config: CacheConfig,
load_config: LoadConfig,
local_rank: int,
rank: int,
distributed_init_method: str,
lora_config: Optional[LoRAConfig] = None,
vision_language_config: Optional[VisionLanguageConfig] = None,
is_driver_worker: bool = False,
) -> None:
# self.model = model # will be replaced in the init_model
self.model_config = model_config
self.parallel_config = parallel_config
self.scheduler_config = scheduler_config
self.device_config = device_config
self.cache_config = cache_config
self.local_rank = local_rank
self.rank = rank
self.distributed_init_method = distributed_init_method
self.lora_config = lora_config
self.load_config = load_config
self.is_driver_worker = is_driver_worker
if self.is_driver_worker:
assert self.rank == 0, "The driver worker must have rank 0."
self.vision_language_config = vision_language_config
if self.vision_language_config:
assert not self.lora_config, ("To be tested: vision language model with LoRA settings.")
self.model_runner = ModelRunner(
model,
model_config,
parallel_config,
scheduler_config,
device_config,
load_config=load_config,
lora_config=self.lora_config,
kv_cache_dtype=self.cache_config.cache_dtype,
vision_language_config=vision_language_config,
)
# Uninitialized cache engine. Will be initialized by
# init_cache_engine.
self.cache_engine: CacheEngine = None
self.gpu_cache: List[torch.Tensor] = None
# NOTE(sgm): For offloading inference engine params
self.cpu_model = None
def init_device(self) -> None:
if self.device_config.device.type == "cuda":
# torch.distributed.all_reduce does not free the input tensor until
# the synchronization point. This causes the memory usage to grow
# as the number of all_reduce calls increases. This env var disables
# this behavior.
# Related issue:
# https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
# NOTE(sgm): Modify for verl, Env vars will be set by TORCHRUN.
self.rank = self.rank if self.rank is not None else int(os.getenv("RANK", "-1"))
local_rank = int(os.getenv("LOCAL_RANK", "0"))
self.device = torch.device(f"cuda:{local_rank}")
if self.rank < 0:
raise ValueError("Invalid or unspecified rank.")
torch.cuda.set_device(self.device)
# Use the world_size set by TORCHRUN
world_size = int(os.getenv("WORLD_SIZE", "-1"))
assert world_size != -1, "The world_size is set to -1, not initialized by TORCHRUN"
self.parallel_config.world_size = world_size
_check_if_gpu_supports_dtype(self.model_config.dtype)
torch.cuda.empty_cache()
self.init_gpu_memory = torch.cuda.mem_get_info()[0]
else:
raise RuntimeError(f"Not support device type: {self.device_config.device}")
# Initialize the distributed environment.
init_worker_distributed_environment(self.parallel_config, self.rank, self.distributed_init_method,
self.local_rank)
# Set random seed.
set_random_seed(self.model_config.seed)
# self.model = get_model(actor_model=self.model, model_config=self.model_config)
@torch.inference_mode()
def determine_num_available_blocks(self) -> Tuple[int, int]:
"""Profiles the peak memory usage of the model to determine how many
KV blocks may be allocated without OOMs.
The engine will first conduct a profiling of the existing memory usage.
Then, it calculate the maximum possible number of GPU and CPU blocks
that can be allocated with the remaining free memory.
.. tip::
You may limit the usage of GPU memory
by adjusting the `gpu_memory_utilization` parameter.
"""
# Profile the memory usage of the model and get the maximum number of
# cache blocks that can be allocated with the remaining free memory.
torch.cuda.empty_cache()
# torch.cuda.reset_peak_memory_stats()
# Execute a forward pass with dummy inputs to profile the memory usage
# of the model.
self.model_runner.profile_run()
# Calculate the number of blocks that can be allocated with the
# profiled peak memory.
torch.cuda.synchronize()
free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info()
peak_memory = total_gpu_memory - free_gpu_memory
assert peak_memory > 0, ("Error in memory profiling. This happens when the GPU memory was "
"not properly cleaned up before initializing the vLLM instance.")
cache_block_size = self.get_cache_block_size_bytes()
# NOTE(sgm) use the remaining memory
num_gpu_blocks = int((free_gpu_memory * self.cache_config.gpu_memory_utilization) // cache_block_size)
# num_gpu_blocks = int((total_gpu_memory * self.cache_config.gpu_memory_utilization - peak_memory) // cache_block_size)
num_cpu_blocks = int(self.cache_config.swap_space_bytes // cache_block_size)
num_gpu_blocks = max(num_gpu_blocks, 0)
num_cpu_blocks = max(num_cpu_blocks, 0)
if self.model_runner.lora_manager:
self.model_runner.remove_all_loras()
# NOTE(sgm): Add for verl, synchronize number of blocks with all the rank
num_gpu_blocks = torch.tensor([num_gpu_blocks], device='cuda')
num_cpu_blocks = torch.tensor([num_cpu_blocks], device='cuda')
torch.distributed.all_reduce(num_gpu_blocks,
op=torch.distributed.ReduceOp.MIN,
group=get_tensor_model_parallel_group())
torch.distributed.all_reduce(num_cpu_blocks,
op=torch.distributed.ReduceOp.MIN,
group=get_tensor_model_parallel_group())
num_gpu_blocks = num_gpu_blocks.item()
num_cpu_blocks = num_cpu_blocks.item()
gc.collect()
torch.cuda.empty_cache()
return num_gpu_blocks, num_cpu_blocks
def _init_cache_engine(self):
if self.cache_engine is None and self.gpu_cache is None:
super()._init_cache_engine()
def free_cache_engine(self):
# ensure `enforce_eager=True`
self.cache_engine = None
self.gpu_cache = None
@torch.inference_mode()
def execute_model(self, execute_model_req: Optional[ExecuteModelRequest] = None) -> List[SamplerOutput]:
if execute_model_req is None:
seq_group_metadata_list = None
else:
seq_group_metadata_list = execute_model_req.seq_group_metadata_list
# NOTE(sgm): each SPMD rank will have identical input
assert seq_group_metadata_list is not None
assert execute_model_req is not None
num_seq_groups = len(seq_group_metadata_list)
blocks_to_swap_in = execute_model_req.blocks_to_swap_in
blocks_to_swap_out = execute_model_req.blocks_to_swap_out
blocks_to_copy = execute_model_req.blocks_to_copy
self.cache_swap(blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy)
# If there is no input, we don't need to execute the model.
if num_seq_groups == 0:
return []
output = self.model_runner.execute_model(seq_group_metadata_list, self.gpu_cache)
# Worker only supports single-step execution. Wrap the output in a list
# to conform to interface.
return [output]
# assume the input is .state_dict()
def sync_model_weights(self, actor_weights: Dict, load_format: str):
if load_format in [LoadFormat.MEGATRON, LoadFormat.AUTO]:
load_megatron_weights(actor_weights, self.model_runner.model)
elif load_format == LoadFormat.HF:
# full model state dict without no sharding
load_hf_weights(actor_weights, self.model_runner.model)
elif load_format == LoadFormat.DTENSOR:
load_dtensor_weights(actor_weights, self.model_runner.model)
def offload_model_weights(self) -> None:
if self.cpu_model == None:
self.cpu_model = {}
for name, params in self.model_runner.model.named_parameters():
self.cpu_model[name] = torch.empty_like(params, device='cpu')
params.data = self.cpu_model[name]
else:
for name, params in self.model_runner.model.named_parameters():
params.data = self.cpu_model[name]
def init_worker_distributed_environment(
parallel_config: ParallelConfig,
rank: int,
distributed_init_method: Optional[str] = "env://",
local_rank: int = -1,
) -> None:
"""Initialize the distributed environment."""
# NOTE(sgm) use tcp://localhost:xxxx will hang in HF setting without megatron
init_distributed_environment(parallel_config.world_size, rank, distributed_init_method, local_rank)
ensure_model_parallel_initialized(tensor_model_parallel_size=parallel_config.tensor_parallel_size,
pipeline_model_parallel_size=parallel_config.pipeline_parallel_size)
# TODO(sgm): check whether need this
# if pynccl_utils.is_initialized():
# pynccl_world_size = pynccl_utils.get_world_size()
# if pynccl_world_size != parallel_config.world_size:
# raise RuntimeError(
# "pynccl is already initialized but the pynccl world "
# "size does not match parallel_config.world_size "
# f"({pynccl_world_size} vs. {parallel_config.world_size}).")
# elif parallel_config.world_size > 1:
# # NOTE(woosuk): We don't initialize pynccl process group when world size
# # is 1.
# # NOTE(kaichao): By default, pynccl is initialized for tp group.
# pynccl_utils.init_process_group(
# group=get_tensor_model_parallel_cpu_group())
# # Initialize a custom fast all-reduce implementation.
# if not parallel_config.disable_custom_all_reduce:
# init_custom_ar()
# A small all_reduce for warmup.
torch.distributed.all_reduce(torch.zeros(1).cuda())
# if pynccl_utils.is_initialized():
# pynccl_utils.all_reduce(torch.zeros(1).cuda())
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/arg_utils.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/engine/arg_utils.py
import os
import argparse
import dataclasses
import json
from dataclasses import dataclass
from typing import TYPE_CHECKING, List, Optional, Tuple, Type, Union
import torch.nn as nn
from transformers import PretrainedConfig
from .config import ModelConfig, LoadConfig
from vllm.config import (CacheConfig, DecodingConfig, DeviceConfig, EngineConfig, LoRAConfig, MultiModalConfig,
ObservabilityConfig, ParallelConfig, PromptAdapterConfig, SchedulerConfig, SpeculativeConfig,
TokenizerPoolConfig)
from vllm.executor.executor_base import ExecutorBase
from vllm.logger import init_logger
from vllm.utils import FlexibleArgumentParser
from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS
from vllm.utils import str_to_int_tuple
if TYPE_CHECKING:
from vllm.transformers_utils.tokenizer_group.base_tokenizer_group import (BaseTokenizerGroup)
logger = init_logger(__name__)
def nullable_str(val: str):
if not val or val == "None":
return None
return val
@dataclass
class EngineArgs:
"""Arguments for vLLM engine."""
model_hf_config: PretrainedConfig = None # for verl
served_model_name = None # TODO(sgm): check this
# tokenizer: Optional[str] = None # TODO(sgm): check this
skip_tokenizer_init: bool = False
tokenizer_mode: str = 'auto'
trust_remote_code: bool = False
download_dir: Optional[str] = None
load_format: str = 'auto'
dtype: str = 'auto'
kv_cache_dtype: str = 'auto'
quantization_param_path: Optional[str] = None
seed: int = 0
max_model_len: Optional[int] = None
worker_use_ray: bool = False
# Note: Specifying a custom executor backend by passing a class
# is intended for expert use only. The API may change without
# notice.
distributed_executor_backend: Optional[Union[str, Type[ExecutorBase]]] = None
pipeline_parallel_size: int = 1
tensor_parallel_size: int = 1
max_parallel_loading_workers: Optional[int] = None
block_size: int = 16
enable_prefix_caching: bool = False
disable_sliding_window: bool = False
use_v2_block_manager: bool = False
swap_space: int = 4 # GiB
cpu_offload_gb: int = 0 # GiB
gpu_memory_utilization: float = 0.90
max_num_batched_tokens: Optional[int] = None
max_num_seqs: int = 256
max_logprobs: int = 20 # Default value for OpenAI Chat Completions API
disable_log_stats: bool = False
revision: Optional[str] = None
code_revision: Optional[str] = None
rope_scaling: Optional[dict] = None
rope_theta: Optional[float] = None
tokenizer_revision: Optional[str] = None
quantization: Optional[str] = None
enforce_eager: bool = False
max_context_len_to_capture: Optional[int] = None
max_seq_len_to_capture: int = 8192
disable_custom_all_reduce: bool = False
tokenizer_pool_size: int = 0
# Note: Specifying a tokenizer pool by passing a class
# is intended for expert use only. The API may change without
# notice.
tokenizer_pool_type: Union[str, Type["BaseTokenizerGroup"]] = "ray"
tokenizer_pool_extra_config: Optional[dict] = None
enable_lora: bool = False
max_loras: int = 1
max_lora_rank: int = 16
enable_prompt_adapter: bool = False
max_prompt_adapters: int = 1
max_prompt_adapter_token: int = 0
fully_sharded_loras: bool = False
lora_extra_vocab_size: int = 256
long_lora_scaling_factors: Optional[Tuple[float]] = None
lora_dtype: str = 'auto'
max_cpu_loras: Optional[int] = None
device: str = 'auto'
ray_workers_use_nsight: bool = False
num_gpu_blocks_override: Optional[int] = None
num_lookahead_slots: int = 0
model_loader_extra_config: Optional[dict] = None
ignore_patterns: Optional[Union[str, List[str]]] = None
preemption_mode: Optional[str] = None
scheduler_delay_factor: float = 0.0
enable_chunked_prefill: Optional[bool] = None
guided_decoding_backend: str = 'outlines'
# Speculative decoding configuration.
speculative_model: Optional[str] = None
speculative_draft_tensor_parallel_size: Optional[int] = None
num_speculative_tokens: Optional[int] = None
speculative_max_model_len: Optional[int] = None
speculative_disable_by_batch_size: Optional[int] = None
ngram_prompt_lookup_max: Optional[int] = None
ngram_prompt_lookup_min: Optional[int] = None
spec_decoding_acceptance_method: str = 'rejection_sampler'
typical_acceptance_sampler_posterior_threshold: Optional[float] = None
typical_acceptance_sampler_posterior_alpha: Optional[float] = None
qlora_adapter_name_or_path: Optional[str] = None
disable_logprobs_during_spec_decoding: Optional[bool] = None
otlp_traces_endpoint: Optional[str] = None
@staticmethod
def add_cli_args(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Shared CLI arguments for vLLM engine."""
# Model arguments
# TODO(shengguangming): delete the unused args
parser.add_argument('--model',
type=str,
default='facebook/opt-125m',
help='name or path of the huggingface model to use')
parser.add_argument('--tokenizer',
type=str,
default=EngineArgs.tokenizer,
help='name or path of the huggingface tokenizer to use')
parser.add_argument('--revision',
type=str,
default=None,
help='the specific model version to use. It can be a branch '
'name, a tag name, or a commit id. If unspecified, will use '
'the default version.')
parser.add_argument('--tokenizer-revision',
type=str,
default=None,
help='the specific tokenizer version to use. It can be a branch '
'name, a tag name, or a commit id. If unspecified, will use '
'the default version.')
parser.add_argument('--tokenizer-mode',
type=str,
default=EngineArgs.tokenizer_mode,
choices=['auto', 'slow'],
help='tokenizer mode. "auto" will use the fast '
'tokenizer if available, and "slow" will '
'always use the slow tokenizer.')
parser.add_argument('--trust-remote-code', action='store_true', help='trust remote code from huggingface')
parser.add_argument('--download-dir',
type=str,
default=EngineArgs.download_dir,
help='directory to download and load the weights, '
'default to the default cache dir of '
'huggingface')
parser.add_argument('--load-format',
type=str,
default=EngineArgs.load_format,
choices=['auto', 'pt', 'safetensors', 'npcache', 'dummy'],
help='The format of the model weights to load. '
'"auto" will try to load the weights in the safetensors format '
'and fall back to the pytorch bin format if safetensors format '
'is not available. '
'"pt" will load the weights in the pytorch bin format. '
'"safetensors" will load the weights in the safetensors format. '
'"npcache" will load the weights in pytorch format and store '
'a numpy cache to speed up the loading. '
'"dummy" will initialize the weights with random values, '
'which is mainly for profiling.')
parser.add_argument('--dtype',
type=str,
default=EngineArgs.dtype,
choices=['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'],
help='data type for model weights and activations. '
'The "auto" option will use FP16 precision '
'for FP32 and FP16 models, and BF16 precision '
'for BF16 models.')
parser.add_argument('--max-model-len',
type=int,
default=None,
help='model context length. If unspecified, '
'will be automatically derived from the model.')
# Parallel arguments
parser.add_argument('--worker-use-ray',
action='store_true',
help='use Ray for distributed serving, will be '
'automatically set when using more than 1 GPU')
parser.add_argument('--pipeline-parallel-size',
'-pp',
type=int,
default=EngineArgs.pipeline_parallel_size,
help='number of pipeline stages')
parser.add_argument('--tensor-parallel-size',
'-tp',
type=int,
default=EngineArgs.tensor_parallel_size,
help='number of tensor parallel replicas')
# KV cache arguments
parser.add_argument('--block-size',
type=int,
default=EngineArgs.block_size,
choices=[8, 16, 32],
help='token block size')
# TODO(woosuk): Support fine-grained seeds (e.g., seed per request).
parser.add_argument('--seed', type=int, default=EngineArgs.seed, help='random seed')
parser.add_argument('--swap-space',
type=int,
default=EngineArgs.swap_space,
help='CPU swap space size (GiB) per GPU')
parser.add_argument('--gpu-memory-utilization',
type=float,
default=EngineArgs.gpu_memory_utilization,
help='the percentage of GPU memory to be used for'
'the model executor')
parser.add_argument('--max-num-batched-tokens',
type=int,
default=EngineArgs.max_num_batched_tokens,
help='maximum number of batched tokens per '
'iteration')
parser.add_argument('--max-num-seqs',
type=int,
default=EngineArgs.max_num_seqs,
help='maximum number of sequences per iteration')
parser.add_argument('--disable-log-stats', action='store_true', help='disable logging statistics')
# Quantization settings.
parser.add_argument('--quantization',
'-q',
type=str,
choices=['awq', None],
default=None,
help='Method used to quantize the weights')
return parser
@classmethod
def from_cli_args(cls, args: argparse.Namespace) -> 'EngineArgs':
# Get the list of attributes of this dataclass.
attrs = [attr.name for attr in dataclasses.fields(cls)]
# Set the attributes from the parsed arguments.
engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
return engine_args
def create_engine_config(
self,
) -> EngineConfig:
# bitsandbytes quantization needs a specific model loader
# so we make sure the quant method and the load format are consistent
if (self.quantization == "bitsandbytes" or
self.qlora_adapter_name_or_path is not None) and \
self.load_format != "bitsandbytes":
raise ValueError("BitsAndBytes quantization and QLoRA adapter only support "
f"'bitsandbytes' load format, but got {self.load_format}")
if (self.load_format == "bitsandbytes" or
self.qlora_adapter_name_or_path is not None) and \
self.quantization != "bitsandbytes":
raise ValueError("BitsAndBytes load format and QLoRA adapter only support "
f"'bitsandbytes' quantization, but got {self.quantization}")
assert self.cpu_offload_gb >= 0, ("CPU offload space must be non-negative"
f", but got {self.cpu_offload_gb}")
multimodal_config = MultiModalConfig()
device_config = DeviceConfig(self.device)
# NOTE(sgm): we only modify ModelConfig, other configs are import from vllm
model_config = ModelConfig(hf_config=self.model_hf_config,
tokenizer_mode=self.tokenizer_mode,
trust_remote_code=self.trust_remote_code,
dtype=self.dtype,
seed=self.seed,
revision=self.revision,
code_revision=self.code_revision,
rope_scaling=self.rope_scaling,
rope_theta=self.rope_theta,
tokenizer_revision=self.tokenizer_revision,
max_model_len=self.max_model_len,
quantization=self.quantization,
quantization_param_path=self.quantization_param_path,
enforce_eager=self.enforce_eager,
max_context_len_to_capture=self.max_context_len_to_capture,
max_seq_len_to_capture=self.max_seq_len_to_capture,
max_logprobs=self.max_logprobs,
disable_sliding_window=self.disable_sliding_window,
skip_tokenizer_init=self.skip_tokenizer_init,
served_model_name=self.served_model_name,
multimodal_config=multimodal_config)
cache_config = CacheConfig(
block_size=self.block_size,
gpu_memory_utilization=self.gpu_memory_utilization,
swap_space=self.swap_space,
cache_dtype=self.kv_cache_dtype,
num_gpu_blocks_override=self.num_gpu_blocks_override,
sliding_window=model_config.get_sliding_window(),
enable_prefix_caching=self.enable_prefix_caching,
cpu_offload_gb=self.cpu_offload_gb,
)
parallel_config = ParallelConfig(pipeline_parallel_size=self.pipeline_parallel_size,
tensor_parallel_size=self.tensor_parallel_size,
worker_use_ray=self.worker_use_ray,
max_parallel_loading_workers=self.max_parallel_loading_workers,
disable_custom_all_reduce=self.disable_custom_all_reduce,
tokenizer_pool_config=TokenizerPoolConfig.create_config(
self.tokenizer_pool_size,
self.tokenizer_pool_type,
self.tokenizer_pool_extra_config,
),
ray_workers_use_nsight=self.ray_workers_use_nsight,
distributed_executor_backend=self.distributed_executor_backend)
# NOTE[VERL]: Use the world_size set by TORCHRUN
world_size = int(os.getenv("WORLD_SIZE", "-1"))
assert world_size != -1, "The world_size is set to -1, not initialized by TORCHRUN"
parallel_config.world_size = world_size
max_model_len = model_config.max_model_len
use_long_context = max_model_len > 32768
if self.enable_chunked_prefill is None:
# If not explicitly set, enable chunked prefill by default for
# long context (> 32K) models. This is to avoid OOM errors in the
# initial memory profiling phase.
if use_long_context:
is_gpu = device_config.device_type == "cuda"
use_sliding_window = (model_config.get_sliding_window() is not None)
use_spec_decode = self.speculative_model is not None
has_seqlen_agnostic_layers = (model_config.contains_seqlen_agnostic_layers(parallel_config))
if (is_gpu and not use_sliding_window and not use_spec_decode and not self.enable_lora and
not self.enable_prompt_adapter and not self.enable_prefix_caching and
not has_seqlen_agnostic_layers):
self.enable_chunked_prefill = True
logger.warning("Chunked prefill is enabled by default for models with "
"max_model_len > 32K. Currently, chunked prefill might "
"not work with some features or models. If you "
"encounter any issues, please disable chunked prefill "
"by setting --enable-chunked-prefill=False.")
if self.enable_chunked_prefill is None:
self.enable_chunked_prefill = False
if not self.enable_chunked_prefill and use_long_context:
logger.warning(
"The model has a long context length (%s). This may cause OOM "
"errors during the initial memory profiling phase, or result "
"in low performance due to small KV cache space. Consider "
"setting --max-model-len to a smaller value.", max_model_len)
# TODO: spec config
speculative_config = SpeculativeConfig.maybe_create_spec_config(
target_model_config=model_config,
target_parallel_config=parallel_config,
target_dtype=self.dtype,
speculative_model=self.speculative_model,
speculative_draft_tensor_parallel_size = \
self.speculative_draft_tensor_parallel_size,
num_speculative_tokens=self.num_speculative_tokens,
speculative_disable_by_batch_size=self.
speculative_disable_by_batch_size,
speculative_max_model_len=self.speculative_max_model_len,
enable_chunked_prefill=self.enable_chunked_prefill,
use_v2_block_manager=self.use_v2_block_manager,
disable_log_stats=self.disable_log_stats,
ngram_prompt_lookup_max=self.ngram_prompt_lookup_max,
ngram_prompt_lookup_min=self.ngram_prompt_lookup_min,
draft_token_acceptance_method=\
self.spec_decoding_acceptance_method,
typical_acceptance_sampler_posterior_threshold=self.
typical_acceptance_sampler_posterior_threshold,
typical_acceptance_sampler_posterior_alpha=self.
typical_acceptance_sampler_posterior_alpha,
disable_logprobs=self.disable_logprobs_during_spec_decoding,
)
scheduler_config = SchedulerConfig(
max_num_batched_tokens=self.max_num_batched_tokens,
max_num_seqs=self.max_num_seqs,
max_model_len=model_config.max_model_len,
use_v2_block_manager=self.use_v2_block_manager,
num_lookahead_slots=(self.num_lookahead_slots
if speculative_config is None else speculative_config.num_lookahead_slots),
delay_factor=self.scheduler_delay_factor,
enable_chunked_prefill=self.enable_chunked_prefill,
embedding_mode=model_config.embedding_mode,
preemption_mode=self.preemption_mode,
)
lora_config = LoRAConfig(max_lora_rank=self.max_lora_rank,
max_loras=self.max_loras,
fully_sharded_loras=self.fully_sharded_loras,
lora_extra_vocab_size=self.lora_extra_vocab_size,
long_lora_scaling_factors=self.long_lora_scaling_factors,
lora_dtype=self.lora_dtype,
max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras and self.max_cpu_loras > 0 else
None) if self.enable_lora else None
if self.qlora_adapter_name_or_path is not None and \
self.qlora_adapter_name_or_path != "":
if self.model_loader_extra_config is None:
self.model_loader_extra_config = {}
self.model_loader_extra_config["qlora_adapter_name_or_path"] = self.qlora_adapter_name_or_path
load_config = LoadConfig(
load_format=self.load_format,
download_dir=self.download_dir,
model_loader_extra_config=self.model_loader_extra_config,
ignore_patterns=self.ignore_patterns,
)
prompt_adapter_config = PromptAdapterConfig(
max_prompt_adapters=self.max_prompt_adapters,
max_prompt_adapter_token=self.max_prompt_adapter_token) \
if self.enable_prompt_adapter else None
decoding_config = DecodingConfig(guided_decoding_backend=self.guided_decoding_backend)
observability_config = ObservabilityConfig(otlp_traces_endpoint=self.otlp_traces_endpoint)
if (model_config.get_sliding_window() is not None and scheduler_config.chunked_prefill_enabled and
not scheduler_config.use_v2_block_manager):
raise ValueError("Chunked prefill is not supported with sliding window. "
"Set --disable-sliding-window to disable sliding window.")
return EngineConfig(
model_config=model_config,
cache_config=cache_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config,
device_config=device_config,
lora_config=lora_config,
multimodal_config=multimodal_config,
speculative_config=speculative_config,
load_config=load_config,
decoding_config=decoding_config,
observability_config=observability_config,
prompt_adapter_config=prompt_adapter_config,
)
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/config.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/config.py
import enum
import json
from typing import List, Optional, Union
from dataclasses import dataclass, field, fields
import torch
from transformers import PretrainedConfig
from vllm.logger import init_logger
from vllm.model_executor.layers.quantization import get_quantization_config
from vllm.transformers_utils.config import get_hf_text_config
from vllm.utils import is_hip, print_warning_once
# Add for verl
from vllm.config import ModelConfig, _get_and_verify_dtype, _get_and_verify_max_len, get_served_model_name
GPTQMarlinConfig = get_quantization_config("gptq_marlin")
logger = init_logger(__name__)
_GB = 1 << 30
class ModelConfig(ModelConfig):
"""Configuration for the model.
Args:
model: Name or path of the huggingface model to use.
tokenizer: Name or path of the huggingface tokenizer to use.
tokenizer_mode: Tokenizer mode. "auto" will use the fast tokenizer if
available, and "slow" will always use the slow tokenizer.
trust_remote_code: Trust remote code (e.g., from HuggingFace) when
downloading the model and tokenizer.
download_dir: Directory to download and load the weights, default to the
default cache directory of huggingface.
load_format: The format of the model weights to load:
"auto" will try to load the weights in the safetensors format and
fall back to the pytorch bin format if safetensors format is
not available.
"pt" will load the weights in the pytorch bin format.
"safetensors" will load the weights in the safetensors format.
"npcache" will load the weights in pytorch format and store
a numpy cache to speed up the loading.
"dummy" will initialize the weights with random values, which is
mainly for profiling.
dtype: Data type for model weights and activations. The "auto" option
will use FP16 precision for FP32 and FP16 models, and BF16 precision
for BF16 models.
seed: Random seed for reproducibility.
revision: The specific model version to use. It can be a branch name,
a tag name, or a commit id. If unspecified, will use the default
version.
code_revision: The specific revision to use for the model code on
Hugging Face Hub. It can be a branch name, a tag name, or a
commit id. If unspecified, will use the default version.
tokenizer_revision: The specific tokenizer version to use. It can be a
branch name, a tag name, or a commit id. If unspecified, will use
the default version.
max_model_len: Maximum length of a sequence (including prompt and
output). If None, will be derived from the model.
quantization: Quantization method that was used to quantize the model
weights. If None, we assume the model weights are not quantized.
quantization_param_path: Path to JSON file containing scaling factors.
Used to load KV cache scaling factors into the model when KV cache
type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also
be used to load activation and weight scaling factors when the
model dtype is FP8_E4M3 on ROCm.
enforce_eager: Whether to enforce eager execution. If True, we will
disable CUDA graph and always execute the model in eager mode.
If False, we will use CUDA graph and eager execution in hybrid.
max_context_len_to_capture: Maximum context len covered by CUDA graphs.
When a sequence has context length larger than this, we fall back
to eager mode (DEPRECATED. Use max_seq_len_to_capture instead).
max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs.
When a sequence has context length larger than this, we fall back
to eager mode
skip_tokenizer_init: If true, skip initialization of tokenizer and
detokenizer.
served_model_name: The model name used in metrics tag `model_name`,
matches the model name exposed via the APIs. If multiple model
names provided, the first name will be used. If not specified,
the model name will be the same as `model`.
"""
def __init__(
self,
hf_config: PretrainedConfig,
tokenizer_mode: str,
trust_remote_code: bool,
dtype: Union[str, torch.dtype],
seed: int,
revision: Optional[str] = None,
code_revision: Optional[str] = None,
rope_scaling: Optional[dict] = None,
rope_theta: Optional[float] = None,
tokenizer_revision: Optional[str] = None,
max_model_len: Optional[int] = None,
quantization: Optional[str] = None,
quantization_param_path: Optional[str] = None,
enforce_eager: bool = False,
max_context_len_to_capture: Optional[int] = None,
max_seq_len_to_capture: Optional[int] = None,
max_logprobs: int = 20,
disable_sliding_window: bool = False,
skip_tokenizer_init: bool = False,
served_model_name: Optional[Union[str, List[str]]] = None,
multimodal_config: Optional["MultiModalConfig"] = None,
) -> None:
self.model = hf_config._name_or_path
self.tokenizer = hf_config._name_or_path
# NOTE(sgm): same as open-sourced
self.tokenizer_mode = tokenizer_mode
self.trust_remote_code = trust_remote_code
self.seed = seed
self.revision = revision
self.code_revision = code_revision
self.rope_scaling = rope_scaling
self.rope_theta = rope_theta
# The tokenizer version is consistent with the model version by default.
if tokenizer_revision is None:
self.tokenizer_revision = revision
else:
self.tokenizer_revision = tokenizer_revision
self.quantization = quantization
self.quantization_param_path = quantization_param_path
self.enforce_eager = enforce_eager
if max_context_len_to_capture is not None:
raise ValueError("`max_context_len_to_capture` is deprecated. "
"Use `max_seq_len_to_capture` instead.")
self.max_seq_len_to_capture = max_seq_len_to_capture
self.max_logprobs = max_logprobs
self.disable_sliding_window = disable_sliding_window
self.skip_tokenizer_init = skip_tokenizer_init
# self.hf_config = get_config(model, trust_remote_code, revision)
self.hf_config = hf_config
self.hf_text_config = get_hf_text_config(hf_config)
self.dtype = _get_and_verify_dtype(self.hf_text_config, dtype)
# self.served_model_name = get_served_model_name(model,
# served_model_name)
# self._verify_load_format()
# self._verify_tokenizer_mode()
if (not self.disable_sliding_window and self.hf_text_config.model_type == "gemma2" and
self.hf_text_config.sliding_window is not None):
print_warning_once("Gemma 2 uses sliding window attention for every odd layer, "
"which is currently not supported by vLLM. Disabling sliding "
"window and capping the max length to the sliding window size "
f"({self.hf_text_config.sliding_window}).")
self.disable_sliding_window = True
self.max_model_len = _get_and_verify_max_len(hf_config=self.hf_text_config,
max_model_len=max_model_len,
disable_sliding_window=self.disable_sliding_window,
sliding_window_len=self.get_hf_config_sliding_window())
self.served_model_name = get_served_model_name(
self.model, # str
served_model_name)
self.multimodal_config = multimodal_config
if not self.skip_tokenizer_init:
self._verify_tokenizer_mode()
self._verify_embedding_mode()
self._verify_quantization()
self._verify_cuda_graph()
class LoadFormat(str, enum.Enum):
AUTO = 'auto'
MEGATRON = "megatron"
HF = "hf"
DTENSOR = 'dtensor'
DUMMY_HF = 'dummy_hf'
DUMMY_MEGATRON = 'dummy_megatron'
DUMMY_DTENSOR = 'dummy_dtensor'
# TODO: check whether this is necessary
@dataclass
class LoadConfig:
"""
download_dir: Directory to download and load the weights, default to the
default cache directory of huggingface.
load_format: The format of the model weights to load:
"auto" will try to load the weights in the safetensors format and
fall back to the pytorch bin format if safetensors format is
not available.
"pt" will load the weights in the pytorch bin format.
"safetensors" will load the weights in the safetensors format.
"npcache" will load the weights in pytorch format and store
a numpy cache to speed up the loading.
"dummy" will initialize the weights with random values, which is
mainly for profiling.
"tensorizer" will use CoreWeave's tensorizer library for
fast weight loading.
"bitsandbytes" will load nf4 type weights.
ignore_patterns: The list of patterns to ignore when loading the model.
Default to "original/**/*" to avoid repeated loading of llama's
checkpoints.
"""
load_format: Union[str, LoadFormat, "BaseModelLoader"] = LoadFormat.AUTO
download_dir: Optional[str] = None
model_loader_extra_config: Optional[Union[str, dict]] = field(default_factory=dict)
ignore_patterns: Optional[Union[List[str], str]] = None
def __post_init__(self):
model_loader_extra_config = self.model_loader_extra_config or {}
if isinstance(model_loader_extra_config, str):
self.model_loader_extra_config = json.loads(model_loader_extra_config)
self._verify_load_format()
if self.ignore_patterns is not None and len(self.ignore_patterns) > 0:
logger.info("Ignoring the following patterns when downloading weights: %s", self.ignore_patterns)
else:
self.ignore_patterns = ["original/**/*"]
def _verify_load_format(self) -> None:
if not isinstance(self.load_format, str):
return
load_format = self.load_format.lower()
self.load_format = LoadFormat(load_format)
rocm_not_supported_load_format: List[str] = []
if is_hip() and load_format in rocm_not_supported_load_format:
rocm_supported_load_format = [
f for f in LoadFormat.__members__ if (f not in rocm_not_supported_load_format)
]
raise ValueError(f"load format '{load_format}' is not supported in ROCm. "
f"Supported load formats are "
f"{rocm_supported_load_format}")
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/dtensor_weight_loaders.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/models
from typing import Dict, Iterable, Tuple
import torch
import torch.nn as nn
from torch.distributed._tensor import DTensor, Shard, Replicate
from vllm.model_executor.layers.linear import *
from vllm.model_executor.models import ModelRegistry
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.utils import is_pp_missing_parameter
def gemma_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
for (param_name, shard_name, shard_id) in stacked_params_mapping:
if shard_name not in name:
continue
stacked_name = name.replace(shard_name, param_name)
# Skip loading extra bias for GPTQ models.
if stacked_name.endswith(".bias") and stacked_name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[stacked_name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# lm_head is not used in vllm as it is tied with embed_token.
# To prevent errors, skip loading lm_head.weight.
if "lm_head.weight" in name:
continue
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def gptbigcode_dtensor_load_weights(actor_weights: Dict, vllm_model: nn.Module):
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "lm_head.weight" in name:
continue
if ".attn.bias" in name:
# Skip attention mask.
# NOTE: "c_attn.bias" should not be skipped.
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def starcoder2_dtensor_load_weights(actor_weights: Dict, vllm_model: nn.Module):
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
]
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
for (param_name, weight_name, shard_id) in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def llama_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
(".qkv_proj", ".q_proj", "q"),
(".qkv_proj", ".k_proj", "k"),
(".qkv_proj", ".v_proj", "v"),
(".gate_up_proj", ".gate_proj", 0),
(".gate_up_proj", ".up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
if ("rotary_emb.cos_cached" in name or "rotary_emb.sin_cached" in name):
# Models trained using ColossalAI may include these tensors in
# the checkpoint. Skip them.
continue
# With tie_word_embeddings, we can skip lm_head.weight
# The weight might appear unnecessarily in the files if the model is
# processed with quantization, LoRA, fine-tuning, etc.
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
for (param_name, weight_name, shard_id) in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight)
def qwen2_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
for (param_name, weight_name, shard_id) in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
from vllm.model_executor.layers.fused_moe import FusedMoE
def deepseekv2_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
expert_params_mapping = FusedMoE.make_expert_params_mapping(ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=vllm_model.config.n_routed_experts)
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
for (param_name, weight_name, shard_id) in stacked_params_mapping:
# Skip non-stacked layers and experts (experts handled below).
if weight_name not in name:
continue
# We have mlp.experts[0].gate_proj in the checkpoint.
# Since we handle the experts below in expert_params_mapping,
# we need to skip here BEFORE we update the name, otherwise
# name will be updated to mlp.experts[0].gate_up_proj, which
# will then be updated below in expert_params_mapping
# for mlp.experts[0].gate_gate_up_proj, which breaks load.
if (("mlp.experts." in name) and name not in params_dict):
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, vllm_model):
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
for mapping in expert_params_mapping:
param_name, weight_name, expert_id, shard_id = mapping
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
if is_pp_missing_parameter(name, vllm_model):
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param,
local_loaded_weight.to(dtype=param.dtype),
weight_name,
shard_id=shard_id,
expert_id=expert_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, vllm_model):
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def gpt2_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
pass
def redistribute_dtensor(param_name: str, loaded_weights: DTensor, parallelize_plan: Dict = None):
param_name = _process_parameter_names(name=param_name)
if parallelize_plan is not None:
assert param_name in parallelize_plan.keys(), \
f"param name: {param_name} not in parallelize_plan :{parallelize_plan.keys()}"
placement = parallelize_plan[param_name]
local_loaded_weights = loaded_weights.redistribute(device_mesh=loaded_weights.device_mesh,
placements=placement).to_local()
else:
local_loaded_weights = loaded_weights.full_tensor()
return local_loaded_weights
def _process_parameter_names(name):
# Remove '.weight' if it exists at the end of the string
if name.endswith(".weight"):
name = name[:-7]
# Remove 'model.layers.x.' or 'model.' prefix
if "model.layers" in name:
parts = name.split('.')
# Reconstruct the string without 'model.layers.x.'
name = '.'.join(parts[3:]) # parts[0] is 'model', parts[1] is 'layers', parts[2] is 'x'
elif name.startswith("model."):
name = name[6:] # Remove 'model.'
return name
__MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__ = {
'GPT2LMHeadModel': gpt2_dtensor_weight_loader,
'LlamaForCausalLM': llama_dtensor_weight_loader,
'LLaMAForCausalLM': llama_dtensor_weight_loader,
'MistralForCausalLM': llama_dtensor_weight_loader, # mistral is the same as llama in vLLM
'InternLMForCausalLM': llama_dtensor_weight_loader,
'AquilaModel': llama_dtensor_weight_loader,
'AquilaForCausalLM': llama_dtensor_weight_loader,
'Phi3ForCausalLM': llama_dtensor_weight_loader,
'GemmaForCausalLM': gemma_dtensor_weight_loader,
'Gemma2ForCausalLM': gemma_dtensor_weight_loader,
'GPTBigCodeForCausalLM': gptbigcode_dtensor_load_weights,
'Starcoder2ForCausalLM': starcoder2_dtensor_load_weights,
'Qwen2ForCausalLM': qwen2_dtensor_weight_loader,
'DeepseekV2ForCausalLM': deepseekv2_dtensor_weight_loader
}
# the actor model is .state_dict()
# Load dtensor weights
def load_dtensor_weights(actor_weights: Dict, vllm_model: nn.Module):
weight_loader = _get_model_weight_loader(vllm_model.__class__.__name__)
weight_loader(actor_weights, vllm_model)
# NOTE(sgm) to reduce peak memory usage, we offload vllm model to cpu
# after init, and we need this after sync model weights for in first iter.
vllm_model = vllm_model.cuda()
def _get_model_weight_loader(arch: str):
if arch in __MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__:
return __MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__[arch]
raise ValueError(f"Model architectures {arch} are not supported for now. "
f"Supported architectures: {__MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__.keys()}")
# NOTE(sgm): we use per-parameter weight loader in each vllm sub
def update_dtensor_weight_loader():
pass
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/hf_weight_loader.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/models
from typing import Dict, Union, Optional, Iterable, Tuple
import torch
import torch.nn as nn
from vllm.model_executor.model_loader.utils import set_default_torch_dtype
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
def update_hf_weight_loader():
print('no hf weight loader need to be updated')
return
def load_hf_weights(actor_weights: Dict, vllm_model: nn.Module):
assert isinstance(actor_weights, Dict)
with set_default_torch_dtype(next(vllm_model.parameters()).dtype): # TODO
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in actor_weights.keys():
del actor_weights["lm_head.weight"]
vllm_model.load_weights(actor_weights.items())
for _, module in vllm_model.named_modules():
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_method.process_weights_after_loading(module)
# FIXME: Remove this after Mixtral is updated
# to use quant_method.
if hasattr(module, "process_weights_after_loading"):
module.process_weights_after_loading()
vllm_model = vllm_model.cuda()
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/llm.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/entrypoints/llm.py
from contextlib import contextmanager
from typing import ClassVar, List, Optional, Sequence, Union, cast, overload, Dict, Tuple
from tqdm import tqdm
from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers import PretrainedConfig
import torch.nn as nn
from .arg_utils import EngineArgs
from .llm_engine_sp import LLMEngine
from vllm import LLM
from vllm.inputs import (PromptInputs, TextPrompt, TokensPrompt, parse_and_batch_prompt)
from vllm.logger import init_logger
from vllm.lora.request import LoRARequest
from vllm.model_executor.guided_decoding import (GuidedDecodingRequest, get_local_guided_decoding_logits_processor)
from vllm.model_executor.guided_decoding.guided_fields import LLMGuidedOptions
from vllm.outputs import EmbeddingRequestOutput, RequestOutput
from vllm.pooling_params import PoolingParams
from vllm.prompt_adapter.request import PromptAdapterRequest
from vllm.sampling_params import SamplingParams
from vllm.transformers_utils.tokenizer import get_cached_tokenizer
from vllm.usage.usage_lib import UsageContext
from vllm.utils import Counter, deprecate_kwargs
import torch
from torch.nn.utils.rnn import pad_sequence
from verl.workers.rollout.tokenizer import HybridEngineBaseTokenizer
class LLM(LLM):
"""An LLM for generating texts from given prompts and sampling parameters.
This class includes a tokenizer, a language model (possibly distributed
across multiple GPUs), and GPU memory space allocated for intermediate
states (aka KV cache). Given a batch of prompts and sampling parameters,
this class generates texts from the model, using an intelligent batching
mechanism and efficient memory management.
NOTE: This class is intended to be used for offline inference. For online
serving, use the `AsyncLLMEngine` class instead.
NOTE: For the comprehensive list of arguments, see `EngineArgs`.
Args:
model: A HuggingFace Transformers model instance.
tokenizer: A HuggingFace Transformers tokenizer instance.
tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
if available, and "slow" will always use the slow tokenizer.
trust_remote_code: Trust remote code (e.g., from HuggingFace) when
downloading the model and tokenizer.
tensor_parallel_size: The number of GPUs to use for distributed
execution with tensor parallelism.
dtype: The data type for the model weights and activations. Currently,
we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
the `torch_dtype` attribute specified in the model config file.
However, if the `torch_dtype` in the config is `float32`, we will
use `float16` instead.
quantization: The method used to quantize the model weights. Currently,
we support "awq". If None, we assume the model weights are not
quantized and use `dtype` to determine the data type of the weights.
revision: The specific model version to use. It can be a branch name,
a tag name, or a commit id.
tokenizer_revision: The specific tokenizer version to use. It can be a
branch name, a tag name, or a commit id.
seed: The seed to initialize the random number generator for sampling.
gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
reserve for the model weights, activations, and KV cache. Higher
values will increase the KV cache size and thus improve the model's
throughput. However, if the value is too high, it may cause out-of-
memory (OOM) errors.
swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
This can be used for temporarily storing the states of the requests
when their `best_of` sampling parameters are larger than 1. If all
requests will have `best_of=1`, you can safely set this to 0.
Otherwise, too small values may cause out-of-memory (OOM) errors.
enforce_eager: Whether to enforce eager execution. If True, we will
disable CUDA graph and always execute the model in eager mode.
If False, we will use CUDA graph and eager execution in hybrid.
max_context_len_to_capture: Maximum context len covered by CUDA graphs.
When a sequence has context length larger than this, we fall back
to eager mode.
disable_custom_all_reduce: See ParallelConfig
"""
def __init__(
self,
model: Union[nn.Module, Dict], # model itself or its parameter dict
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast, HybridEngineBaseTokenizer],
model_hf_config: PretrainedConfig,
tokenizer_mode: str = "auto",
trust_remote_code: bool = False,
skip_tokenizer_init: bool = False,
tensor_parallel_size: int = 1,
dtype: str = "auto",
quantization: Optional[str] = None,
revision: Optional[str] = None,
tokenizer_revision: Optional[str] = None,
seed: int = 0,
gpu_memory_utilization: float = 0.9,
swap_space: int = 4,
cpu_offload_gb: float = 0,
enforce_eager: bool = False,
max_context_len_to_capture: Optional[int] = None,
max_seq_len_to_capture: int = 8192,
disable_custom_all_reduce: bool = False,
load_format = 'auto',
**kwargs,
) -> None:
if "disable_log_stats" not in kwargs:
kwargs["disable_log_stats"] = True
engine_args = EngineArgs(
model_hf_config=model_hf_config,
tensor_parallel_size=tensor_parallel_size,
dtype=dtype,
quantization=quantization,
revision=revision,
tokenizer_revision=tokenizer_revision,
seed=seed,
gpu_memory_utilization=gpu_memory_utilization,
swap_space=swap_space,
cpu_offload_gb=cpu_offload_gb,
enforce_eager=enforce_eager,
max_context_len_to_capture=max_context_len_to_capture,
max_seq_len_to_capture=max_seq_len_to_capture,
disable_custom_all_reduce=disable_custom_all_reduce,
load_format=load_format,
skip_tokenizer_init=skip_tokenizer_init,
**kwargs,
)
tokenizer_cls = (PreTrainedTokenizer, PreTrainedTokenizerFast, HybridEngineBaseTokenizer)
if not isinstance(tokenizer, tokenizer_cls):
raise ValueError(
f"Unexpected tokenizer type: {type(tokenizer)}. Must be"
"one of the following: PreTrainedTokenizer, PreTrainedTokenizerFast, verl.workers.rollout.HybridEngineBaseTokenizer"
)
self.llm_engine = LLMEngine.from_engine_args(model, tokenizer, engine_args) # TODO: check usagecontext
self.request_counter = Counter()
def init_cache_engine(self):
self.llm_engine.init_cache_engine()
def free_cache_engine(self):
self.llm_engine.free_cache_engine()
def get_tokenizer(self) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]:
return self.llm_engine.tokenizer
def set_tokenizer(
self,
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
) -> None:
self.llm_engine.tokenizer = tokenizer
def _run_engine(self, *, use_tqdm: bool) -> List[Union[RequestOutput, EmbeddingRequestOutput]]:
# Initialize tqdm.
if use_tqdm:
num_requests = self.llm_engine.get_num_unfinished_requests()
pbar = tqdm(
total=num_requests,
desc="Processed prompts",
dynamic_ncols=True,
postfix=(f"est. speed input: {0:.2f} toks/s, "
f"output: {0:.2f} toks/s"),
)
# Run the engine.
outputs: List[Union[RequestOutput, EmbeddingRequestOutput]] = []
total_in_toks = 0
total_out_toks = 0
while self.llm_engine.has_unfinished_requests():
step_outputs = self.llm_engine.step()
for output in step_outputs:
if output.finished:
outputs.append(output)
if use_tqdm:
if isinstance(output, RequestOutput):
# Calculate tokens only for RequestOutput
total_in_toks += len(output.prompt_token_ids)
in_spd = total_in_toks / pbar.format_dict["elapsed"]
total_out_toks += sum(len(stp.token_ids) for stp in output.outputs)
out_spd = total_out_toks / pbar.format_dict["elapsed"]
pbar.postfix = (f"est. speed input: {in_spd:.2f} toks/s, "
f"output: {out_spd:.2f} toks/s")
pbar.update(1)
if use_tqdm:
pbar.close()
# Sort the outputs by request ID.
# This is necessary because some requests may be finished earlier than
# its previous requests.
outputs = sorted(outputs, key=lambda x: int(x.request_id))
return self._post_process_outputs(outputs)
# # NOTE(shengguangming): add for verl
# # TODO(sgm): we can optimize it by making the dataloader yield List[int] without padding.
# def _pre_process_inputs(self, prompt_token_ids: torch.Tensor) -> List[int]:
# # remove the left padding in the prompt token_id
# pad_token_id = self.llm_engine.tokenizer.pad_token_id if self.llm_engine.tokenizer.pad_token_id is not None else self.llm_engine.tokenizer.eos_token_id
# non_pad_index = torch.nonzero(prompt_token_ids != pad_token_id, as_tuple=False)[0][0]
# token_ids = prompt_token_ids[non_pad_index:].tolist()
# return token_ids
# NOTE(shengguangming): add for verl
def _post_process_outputs(self, request_outputs: List[RequestOutput]) -> Tuple[torch.Tensor, torch.Tensor]:
output_token_ids = []
logprobs = []
for request_output in request_outputs: # List[RequestOutput]
outputs = request_output.outputs
for output in outputs: # List[CompletionOutput], usually len == 1
output_token_ids.append(torch.tensor(output.token_ids))
# TODO(shengguangming): can be optimzied by rewrite the Sampler._get_logprobs() logits
logprobs_dicts = output.logprobs
if logprobs_dicts is not None:
logprob = []
for logprobs_dict, id in zip(logprobs_dicts, output.token_ids):
logprob.append(logprobs_dict[id].logprob)
logprobs.append(torch.tensor(logprob))
pad_token_id = self.llm_engine.tokenizer.pad_token_id if self.llm_engine.tokenizer.pad_token_id is not None else self.llm_engine.tokenizer.eos_token_id
output_token_ids = pad_sequence(output_token_ids, batch_first=True, padding_value=pad_token_id)
if len(logprobs) > 0:
logprobs = pad_sequence(logprobs, batch_first=True, padding_value=pad_token_id)
return output_token_ids, logprobs
def sync_model_weights(self, actor_weights: Dict[str, torch.Tensor], load_format: str) -> None:
self.llm_engine.sync_model_weights(actor_weights=actor_weights, load_format=load_format)
def offload_model_weights(self) -> None:
self.llm_engine.offload_model_weights()
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/llm_engine_sp.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/engine/llm_engine.py
import torch
from typing import Dict, Optional, Union, Type
import vllm.envs as envs
from vllm.config import (CacheConfig, DecodingConfig, DeviceConfig, EngineConfig, LoRAConfig, MultiModalConfig,
ObservabilityConfig, ParallelConfig, PromptAdapterConfig, SchedulerConfig, SpeculativeConfig)
from vllm.core.scheduler import Scheduler
from vllm.engine.output_processor.interfaces import (SequenceGroupOutputProcessor)
from vllm.engine.output_processor.stop_checker import StopChecker
from vllm.executor.executor_base import ExecutorBase
from vllm.inputs import INPUT_REGISTRY, LLMInputs, PromptInputs
from vllm.logger import init_logger
from vllm.transformers_utils.detokenizer import Detokenizer
from vllm.engine.metrics import (LoggingStatLogger, PrometheusStatLogger, StatLoggerBase, Stats)
from vllm.tracing import (SpanAttributes, SpanKind, extract_trace_context, init_tracer)
from vllm.usage.usage_lib import (UsageContext, is_usage_stats_enabled, usage_message)
from vllm.utils import Counter
from vllm.engine.llm_engine import _load_generation_config_dict
from vllm.engine.llm_engine import LLMEngine
from vllm.version import __version__ as VLLM_VERSION
import torch.nn as nn
from .arg_utils import EngineArgs
from .tokenizer import TokenizerGroup
from .config import ModelConfig, LoadConfig
logger = init_logger(__name__)
_LOCAL_LOGGING_INTERVAL_SEC = 5
class LLMEngine(LLMEngine):
"""An LLM engine that receives requests and generates texts.
This is the main class for the vLLM engine. It receives requests
from clients and generates texts from the LLM. It includes a tokenizer, a
language model (possibly distributed across multiple GPUs), and GPU memory
space allocated for intermediate states (aka KV cache). This class utilizes
iteration-level scheduling and efficient memory management to maximize the
serving throughput.
The `LLM` class wraps this class for offline batched inference and the
`AsyncLLMEngine` class wraps this class for online serving.
NOTE: The config arguments are derived from the `EngineArgs` class. For the
comprehensive list of arguments, see `EngineArgs`.
Args:
model: the actor model initialize outside vllm (add for verl)
tokenizer: the initialized tokenizer (add for verl)
model_config: The configuration related to the LLM model.
cache_config: The configuration related to the KV cache memory
management.
parallel_config: The configuration related to distributed execution.
scheduler_config: The configuration related to the request scheduler.
distributed_init_method: The initialization method for distributed
execution. See `torch.distributed.init_process_group` for details.
placement_group: Ray placement group for distributed execution.
Required for distributed execution.
log_stats: Whether to log statistics.
"""
def __init__(
self,
# NOTE(sgm): first two arguments are added for verl
model: Union[nn.Module, Dict], # model itself or its parameter dict
tokenizer: nn.Module,
# NOTE(sgm): vllm original arguments
model_config: ModelConfig,
cache_config: CacheConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
load_config: LoadConfig,
lora_config: Optional[LoRAConfig],
multimodal_config: Optional[MultiModalConfig],
speculative_config: Optional[SpeculativeConfig],
decoding_config: Optional[DecodingConfig],
observability_config: Optional[ObservabilityConfig],
prompt_adapter_config: Optional[PromptAdapterConfig],
executor_class: Type[ExecutorBase],
log_stats: bool,
usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
stat_loggers: Optional[Dict[str, StatLoggerBase]] = None,
) -> None:
logger.info(
"Initializing an LLM engine (v%s) with config: "
"model=%r, speculative_config=%r, tokenizer=%r, "
"skip_tokenizer_init=%s, revision=%s, "
"rope_scaling=%r, rope_theta=%r, tokenizer_revision=%s, "
"trust_remote_code=%s, dtype=%s, max_seq_len=%d, "
"download_dir=%r, load_format=%s, tensor_parallel_size=%d, "
"pipeline_parallel_size=%d, "
"disable_custom_all_reduce=%s, quantization=%s, "
"enforce_eager=%s, kv_cache_dtype=%s, "
"quantization_param_path=%s, device_config=%s, "
"decoding_config=%r, observability_config=%r, "
"seed=%d, served_model_name=%s, use_v2_block_manager=%s, "
"enable_prefix_caching=%s)",
VLLM_VERSION,
model_config.model,
speculative_config,
model_config.tokenizer,
model_config.skip_tokenizer_init,
model_config.revision,
model_config.rope_scaling,
model_config.rope_theta,
model_config.tokenizer_revision,
model_config.trust_remote_code,
model_config.dtype,
model_config.max_model_len,
load_config.download_dir,
load_config.load_format,
parallel_config.tensor_parallel_size,
parallel_config.pipeline_parallel_size,
parallel_config.disable_custom_all_reduce,
model_config.quantization,
model_config.enforce_eager,
cache_config.cache_dtype,
model_config.quantization_param_path,
device_config.device,
decoding_config,
observability_config,
model_config.seed,
model_config.served_model_name,
scheduler_config.use_v2_block_manager,
cache_config.enable_prefix_caching,
)
# TODO(woosuk): Print more configs in debug mode.
self.model_config = model_config
self.cache_config = cache_config
self.lora_config = lora_config
self.multimodal_config = multimodal_config
self.parallel_config = parallel_config
self.scheduler_config = scheduler_config
self.device_config = device_config
self.speculative_config = speculative_config
self.load_config = load_config
self.decoding_config = decoding_config or DecodingConfig()
self.prompt_adapter_config = prompt_adapter_config
self.observability_config = observability_config or ObservabilityConfig()
self.log_stats = log_stats
# self.model = model # should not store the model, it should be deleted
# TODO(shengguangming): maybe we can choose init here or from arguments
if not self.model_config.skip_tokenizer_init:
self.tokenizer = self._init_tokenizer(tokenizer)
self.detokenizer = Detokenizer(self.tokenizer)
else:
self.tokenizer = None
self.detokenizer = None
self.seq_counter = Counter()
self.generation_config_fields = _load_generation_config_dict(model_config)
self.input_processor = INPUT_REGISTRY.create_input_processor(self.model_config)
self.model_executor = executor_class(
model=model, # add for spmd_gpu_executor
model_config=model_config,
cache_config=cache_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config,
device_config=device_config,
lora_config=lora_config,
multimodal_config=multimodal_config,
speculative_config=speculative_config,
load_config=load_config,
prompt_adapter_config=prompt_adapter_config,
)
# Profile the memory usage and initialize the cache.
if not self.model_config.embedding_mode:
self._initialize_kv_caches()
# If usage stat is enabled, collect relevant info.
if is_usage_stats_enabled():
from vllm.model_executor.model_loader import (get_architecture_class_name)
usage_message.report_usage(
get_architecture_class_name(model_config),
usage_context,
extra_kvs={
# Common configuration
"dtype": str(model_config.dtype),
"tensor_parallel_size": parallel_config.tensor_parallel_size,
"block_size": cache_config.block_size,
"gpu_memory_utilization": cache_config.gpu_memory_utilization,
# Quantization
"quantization": model_config.quantization,
"kv_cache_dtype": str(cache_config.cache_dtype),
# Feature flags
"enable_lora": bool(lora_config),
"enable_prompt_adapter": bool(prompt_adapter_config),
"enable_prefix_caching": cache_config.enable_prefix_caching,
"enforce_eager": model_config.enforce_eager,
"disable_custom_all_reduce": parallel_config.disable_custom_all_reduce,
})
if self.tokenizer:
# Ping the tokenizer to ensure liveness if it runs in a
# different process.
self.tokenizer.ping()
# Create the scheduler.
# NOTE: the cache_config here have been updated with the numbers of
# GPU and CPU blocks, which are profiled in the distributed executor.
self.scheduler = [
Scheduler(scheduler_config, cache_config, lora_config, parallel_config.pipeline_parallel_size)
for _ in range(parallel_config.pipeline_parallel_size)
]
# Metric Logging.
if self.log_stats:
if stat_loggers is not None:
self.stat_loggers = stat_loggers
else:
self.stat_loggers = {
"logging":
LoggingStatLogger(local_interval=_LOCAL_LOGGING_INTERVAL_SEC),
"prometheus":
PrometheusStatLogger(local_interval=_LOCAL_LOGGING_INTERVAL_SEC,
labels=dict(model_name=model_config.served_model_name),
max_model_len=self.model_config.max_model_len),
}
self.stat_loggers["prometheus"].info("cache_config", self.cache_config)
self.tracer = None
if self.observability_config.otlp_traces_endpoint:
self.tracer = init_tracer("vllm.llm_engine", self.observability_config.otlp_traces_endpoint)
# Create sequence output processor, e.g. for beam search or
# speculative decoding.
self.output_processor = (SequenceGroupOutputProcessor.create_output_processor(
self.scheduler_config,
self.detokenizer,
self.scheduler,
self.seq_counter,
self.get_tokenizer_for_seq,
stop_checker=StopChecker(
self.scheduler_config.max_model_len,
self.get_tokenizer_for_seq,
),
))
# TODO(sgm): add for verl but we may not tokenizer in Rollout
def _init_tokenizer(self, tokenizer, **tokenizer_init_kwargs):
init_kwargs = dict(enable_lora=bool(self.lora_config),
max_num_seqs=self.scheduler_config.max_num_seqs,
max_input_length=None)
init_kwargs.update(tokenizer_init_kwargs)
return TokenizerGroup(tokenizer, **init_kwargs)
def init_cache_engine(self):
# TODO: check whether we should rebuild the CUDAGraph every iter when offload/load KVCache
# Re-capture CUDAGraph would be time-consuming
self.model_executor.init_cache_engine()
def free_cache_engine(self):
self.model_executor.free_cache_engine()
# NOTE(sgm): currently, we only support GPU executor
# The GPUExecutor remove the Ray dependency
@classmethod
def _get_executor_cls(cls, engine_config: EngineConfig) -> Type[ExecutorBase]:
assert engine_config.device_config.device_type == "cuda", \
"Currently, the vllm in verl only support running on GPU"
if engine_config.parallel_config.world_size == 1:
engine_config.load_config.load_format = "dummy_hf"
from .spmd_gpu_executor import SPMDGPUExecutor
executor_class = SPMDGPUExecutor
return executor_class
@classmethod
def from_engine_args(
cls,
model,
tokenizer,
engine_args: EngineArgs,
usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
stat_loggers: Optional[Dict[str, StatLoggerBase]] = None,
) -> "LLMEngine":
"""Creates an LLM engine from the engine arguments."""
# Create the engine configs.
engine_config = engine_args.create_engine_config()
executor_class = cls._get_executor_cls(engine_config)
# Initialize the cluster and specify the executor class.
assert engine_config.device_config.device_type == "cuda", \
"Currently, the vllm in verl only support running on GPU"
from .spmd_gpu_executor import SPMDGPUExecutor
executor_class = SPMDGPUExecutor
# Create the LLM engine.
engine = cls(
model,
tokenizer,
**engine_config.to_dict(),
executor_class=executor_class,
log_stats=not engine_args.disable_log_stats,
usage_context=usage_context,
stat_loggers=stat_loggers,
)
return engine
def sync_model_weights(self, actor_weights: Dict[str, torch.Tensor], load_format: str) -> None:
self.model_executor.sync_model_weights(actor_weights=actor_weights, load_format=load_format)
def offload_model_weights(self) -> None:
self.model_executor.offload_model_weights()
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/megatron_weight_loaders.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/models
from typing import Dict
import torch
import torch.nn as nn
from vllm.model_executor.layers.linear import *
from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmbedding, ParallelLMHead
from vllm.model_executor.layers.activation import ScaledActivation
from vllm.model_executor.models import ModelRegistry
# NOTE(shengguangming): replace the origin weight loader function in the class
def parallel_weight_loader(self, param: torch.Tensor, loaded_weight: torch.Tensor) -> None:
"""Parallel Linear weight loader."""
assert param.size() == loaded_weight.size(
), 'the parameter size is not align with the loaded weight size, param size: {}, loaded_weight size: {}'.format(
param.size(), loaded_weight.size())
assert param.data.dtype == loaded_weight.data.dtype, "if we want to shared weights, the data type should also be the same"
param.data = loaded_weight.data
def default_weight_loader(param: torch.Tensor, loaded_weight: torch.Tensor) -> None:
"""Default weight loader."""
assert param.size() == loaded_weight.size()
assert param.data.dtype == loaded_weight.data.dtype, "if we want to shared weights, the data type should also be the same"
param.data = loaded_weight.data
def gpt2_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "lm_head.weight" in name:
# GPT-2 ties the weights of the embedding layer and the final
# linear layer.
continue
if ".attn.bias" in name or ".attn.masked_bias" in name:
# Skip attention mask.
# NOTE: "c_attn.bias" should not be skipped.
continue
if not name.startswith("transformer."):
name = "transformer." + name
param = params_dict[name]
# The HF's GPT-2 implementation uses Conv1D instead of Linear.
# Because of this, we need to transpose the weights.
# Note(zhuohan): the logic below might break quantized models.
for conv1d_weight_name in ["c_attn", "c_proj", "c_fc"]:
if conv1d_weight_name not in name:
continue
if not name.endswith(".weight"):
continue
# TODO: check megatron
loaded_weight = loaded_weight.t()
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def llama_megatron_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def llama_megatron_core_te_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_mapping = [
# (megatron core gpt model name, vllm model name)
("embedding.word_embeddings", "model.embed_tokens"),
("self_attention.linear_qkv.layer_norm_weight", "input_layernorm.weight"),
("self_attention.linear_qkv.layer_norm_bias", "input_layernorm.bias"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_proj", 'self_attn.o_proj'),
('pre_mlp_layernorm', 'post_attention_layernorm'),
('mlp.linear_fc1.layer_norm_weight', 'post_attention_layernorm.weight'),
('mlp.linear_fc1.layer_norm_bias', 'post_attention_layernorm.bias'),
('mlp.linear_fc1', 'mlp.gate_up_proj'),
('mlp.linear_fc2', 'mlp.down_proj'),
('decoder.final_layernorm', 'model.norm'),
('output_layer', 'lm_head'),
]
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
name = _replace_name(name, params_mapping)
if name.endswith('.bias') and name not in params_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def llama_megatron_core_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_mapping = [
# (megatron core gpt model name, vllm model name)
("embedding.word_embeddings", "model.embed_tokens"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_proj", 'self_attn.o_proj'),
(
'input_layernorm',
'input_layernorm',
),
('pre_mlp_layernorm', 'post_attention_layernorm'),
('mlp.linear_fc1', 'mlp.gate_up_proj'),
('mlp.linear_fc2', 'mlp.down_proj'),
('decoder.final_layernorm', 'model.norm'),
('output_layer', 'lm_head'),
]
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
name = _replace_name(name, params_mapping)
if name.endswith('.bias') and name not in params_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def _replace_name(megatron_name, name_mapping):
for m_name, v_name in name_mapping:
if m_name not in megatron_name:
continue
if 'layers' in megatron_name: # deal with decoder layers
megatron_name = megatron_name.replace('decoder', 'model')
megatron_name_list = megatron_name.split('.')
if 'layer_norm_weight' in megatron_name_list or 'layer_norm_bias' in megatron_name_list:
param_name_list = megatron_name_list[:3]
param_name_list.append(v_name)
param_name = '.'.join(param_name_list)
else:
param_name_list = megatron_name_list[:3]
weight_or_bias = megatron_name_list[-1]
param_name_list.append(v_name)
param_name_list.append(weight_or_bias)
param_name = '.'.join(param_name_list)
return param_name
else:
param_name = megatron_name.replace(m_name, v_name)
return param_name
def llama_megatron_core_te_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_mapping = [
# (megatron core gpt model name, vllm model name)
("embedding.word_embeddings", "model.embed_tokens"),
("self_attention.linear_qkv.layer_norm_weight", "input_layernorm.weight"),
("self_attention.linear_qkv.layer_norm_bias", "input_layernorm.bias"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_proj", 'self_attn.o_proj'),
('pre_mlp_layernorm', 'post_attention_layernorm'),
('mlp.linear_fc1.layer_norm_weight', 'post_attention_layernorm.weight'),
('mlp.linear_fc1.layer_norm_bias', 'post_attention_layernorm.bias'),
('mlp.linear_fc1', 'mlp.gate_up_proj'),
('mlp.linear_fc2', 'mlp.down_proj'),
('decoder.final_layernorm', 'model.norm'),
('output_layer', 'lm_head'),
]
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
name = _replace_name(name, params_mapping)
if name.endswith('.bias') and name not in params_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def llama_megatron_core_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_mapping = [
# (megatron core gpt model name, vllm model name)
("embedding.word_embeddings", "model.embed_tokens"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_proj", 'self_attn.o_proj'),
(
'input_layernorm',
'input_layernorm',
),
('pre_mlp_layernorm', 'post_attention_layernorm'),
('mlp.linear_fc1', 'mlp.gate_up_proj'),
('mlp.linear_fc2', 'mlp.down_proj'),
('decoder.final_layernorm', 'model.norm'),
('output_layer', 'lm_head'),
]
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
name = _replace_name(name, params_mapping)
if name.endswith('.bias') and name not in params_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def _replace_name(megatron_name, name_mapping):
for m_name, v_name in name_mapping:
if m_name not in megatron_name:
continue
if 'layers' in megatron_name: # deal with decoder layers
megatron_name = megatron_name.replace('decoder', 'model')
megatron_name_list = megatron_name.split('.')
if 'layer_norm_weight' in megatron_name_list or 'layer_norm_bias' in megatron_name_list:
param_name_list = megatron_name_list[:3]
param_name_list.append(v_name)
param_name = '.'.join(param_name_list)
else:
param_name_list = megatron_name_list[:3]
weight_or_bias = megatron_name_list[-1]
param_name_list.append(v_name)
param_name_list.append(weight_or_bias)
param_name = '.'.join(param_name_list)
return param_name
else:
param_name = megatron_name.replace(m_name, v_name)
return param_name
def mistral_megatron_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
# TODO: need to implement a general way to deal with prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
__LAYER_WEIGHT_MEGATRON_LOADER_REGISTRY__ = {
ColumnParallelLinear: parallel_weight_loader,
MergedColumnParallelLinear: parallel_weight_loader,
QKVParallelLinear: parallel_weight_loader,
RowParallelLinear: parallel_weight_loader,
VocabParallelEmbedding: parallel_weight_loader,
ParallelLMHead: parallel_weight_loader
# "ScaledActivation.weight_loader": ScaledActivation, # TODO(shengguangming): latest commit in vllm fix awq for this function and add load_weights
# "default_weight_loader": default_weight_loader
}
# for layer_class, weight_loader in __LAYER_WEIGHT_MEGATRON_LOADER_REGISTRY__.items():
# # setattr(layer_class, 'megatron_weight_loader', weight_loader)
# layer_class.weight_loader = weight_loader
__MODEL_MEGATRON_WEIGHT_LOADER_REGISTRY__ = {
'GPT2LMHeadModel': gpt2_weight_loader,
'LlamaForCausalLM': llama_megatron_weight_loader, # use te backend for open-source megatron
'LLaMAForCausalLM': llama_megatron_weight_loader,
'MistralForCausalLM': mistral_megatron_weight_loader,
}
# the actor model is .state_dict()
# Load megatron weights
def load_megatron_weights(actor_weights: Dict, vllm_model: nn.Module):
weight_loader = _get_model_weight_loader(vllm_model.__class__.__name__)
weight_loader(actor_weights, vllm_model)
# NOTE(sgm) to reduce peak memory usage, we offload vllm model to cpu
# after init, and we need this after sync model weights for in first iter.
vllm_model = vllm_model.cuda()
def _get_model_weight_loader(arch: str):
if arch in __MODEL_MEGATRON_WEIGHT_LOADER_REGISTRY__:
return __MODEL_MEGATRON_WEIGHT_LOADER_REGISTRY__[arch]
raise ValueError(f"Model architectures {arch} are not supported for now. "
f"Supported architectures: {ModelRegistry.get_supported_archs()}")
def update_megatron_weight_loader():
for layer_class, weight_loader in __LAYER_WEIGHT_MEGATRON_LOADER_REGISTRY__.items():
layer_class.weight_loader = weight_loader
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/model_loader.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/model_loader
from typing import Dict, Union, Optional, Iterable, Tuple
import torch
import torch.nn as nn
from transformers import PreTrainedModel
from vllm.config import (CacheConfig, DeviceConfig, LoadConfig, LoRAConfig, ModelConfig, MultiModalConfig,
ParallelConfig, SchedulerConfig)
from vllm.model_executor.model_loader import BaseModelLoader
from vllm.model_executor.model_loader.loader import _initialize_model
from vllm.model_executor.model_loader.utils import set_default_torch_dtype
from vllm.distributed.communication_op import tensor_model_parallel_all_gather
from .config import ModelConfig, LoadFormat, LoadConfig
from .megatron_weight_loaders import load_megatron_weights, update_megatron_weight_loader
from .dtensor_weight_loaders import load_dtensor_weights, update_dtensor_weight_loader
from .hf_weight_loader import update_hf_weight_loader
def get_model(actor_model: Union[PreTrainedModel, Dict],
model_config: ModelConfig,
load_config: LoadConfig,
device_config: DeviceConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
lora_config: Optional[LoRAConfig],
multimodal_config: Optional[MultiModalConfig],
cache_config: CacheConfig = None) -> nn.Module:
loader = get_model_loader(load_config)
if load_config.load_format.startswith('dummy'):
return loader.load_model(model_config=model_config,
device_config=device_config,
lora_config=lora_config,
multimodal_config=multimodal_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config,
cache_config=cache_config)
else:
return loader.load_model(actor_model=actor_model,
model_config=model_config,
device_config=device_config,
lora_config=lora_config,
multimodal_config=multimodal_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config,
cache_config=cache_config)
def get_model_loader(load_config: LoadConfig) -> BaseModelLoader:
"""Get a model loader based on the load format."""
if isinstance(load_config.load_format, type):
return load_config.load_format(load_config)
if load_config.load_format == LoadFormat.AUTO:
update_megatron_weight_loader()
return MegatronLoader(load_config)
# NOTE(sgm): change the weight_loader function in runtime
if load_config.load_format == LoadFormat.MEGATRON:
update_megatron_weight_loader()
return MegatronLoader(load_config)
if load_config.load_format == LoadFormat.HF:
update_hf_weight_loader()
return HFLoader(load_config)
if load_config.load_format == LoadFormat.DTENSOR:
update_dtensor_weight_loader()
return DTensorLoader(load_config)
if load_config.load_format == LoadFormat.DUMMY_HF:
update_hf_weight_loader()
return DummyModelLoader(load_config)
if load_config.load_format == LoadFormat.DUMMY_MEGATRON:
update_megatron_weight_loader()
return DummyModelLoader(load_config)
if load_config.load_format == LoadFormat.DUMMY_DTENSOR:
update_dtensor_weight_loader()
return DummyModelLoader(load_config)
raise ValueError('load format not supported in verl: {}, only support {} and {}'.format(
load_config.load_format, LoadFormat.MEGATRON, LoadFormat.HF))
class DummyModelLoader(BaseModelLoader):
"""Model loader that will set model weights to random values."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(f"Model loader extra config is not supported for "
f"load format {load_config.load_format}")
def load_model(self, *, model_config: ModelConfig, device_config: DeviceConfig, lora_config: Optional[LoRAConfig],
multimodal_config: Optional[MultiModalConfig], parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig, cache_config: CacheConfig) -> nn.Module:
with set_default_torch_dtype(model_config.dtype):
with torch.device(device_config.device):
model = _initialize_model(model_config, self.load_config, lora_config, multimodal_config, cache_config,
scheduler_config)
# NOTE(woosuk): For accurate performance evaluation, we assign
# random values to the weights.
# initialize_dummy_weights(model)
return model.eval()
class MegatronLoader(BaseModelLoader):
"""Model loader that can load the model weights from partitioned megatron model."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(f"Model loader extra config is not supported for "
f"load format {load_config.load_format}")
def _get_weights_iterator(actor_model: Union[PreTrainedModel, Dict]):
# NOTE(shengguangming) Load the weights from the actor model
pass
# if isinstance(actor_model, nn.Module):
# load_weights(actor_weights=dict(actor_model.named_parameters(remove_duplicate=False)), vllm_model=model)
# else:
# load_weights(actor_weights=actor_model, vllm_model=model)
# return actor_model
def load_model(self, actor_model: Union[PreTrainedModel, Dict], model_config: ModelConfig,
device_config: DeviceConfig, lora_config: Optional[LoRAConfig],
multimodal_config: Optional[MultiModalConfig], parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig, cache_config: CacheConfig) -> nn.Module:
with set_default_torch_dtype(model_config.dtype):
with torch.device(device_config.device):
model = _initialize_model(model_config, self.load_config, lora_config, multimodal_config, cache_config,
scheduler_config)
# TODO(sgm): This is a hack, we need to register the load_weight() func for each model in vllm
if isinstance(actor_model, nn.Module):
load_megatron_weights(actor_weights=dict(actor_model.named_parameters(remove_duplicate=False)),
vllm_model=model)
else:
load_megatron_weights(actor_weights=actor_model, vllm_model=model)
for _, module in model.named_modules():
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_method.process_weights_after_loading(module)
# FIXME: Remove this after Mixtral is updated
# to use quant_method.
if hasattr(module, "process_weights_after_loading"):
module.process_weights_after_loading()
# NOTE(sgm) Some weights are point to gpu, but still need this.
model = model.cuda() # NOTE (zhangchi.usc1992) We need this for vllm to profile memory usage
return model.eval()
class HFLoader(BaseModelLoader):
"""Model loader that can load the model weights from model's full params."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(f"Model loader extra config is not supported for "
f"load format {load_config.load_format}")
def _get_weights_iterator(self, actor_model: Union[PreTrainedModel, Dict]):
if isinstance(actor_model, Dict):
return actor_model.items()
elif isinstance(actor_model, nn.Module):
return dict(actor_model.named_parameters()).items()
else:
raise ValueError(f'actor model should be Dict or nn.Module, but get {type(actor_model)}')
def load_model(self, actor_model: Union[PreTrainedModel, Dict], model_config: ModelConfig,
device_config: DeviceConfig, lora_config: Optional[LoRAConfig],
multimodal_config: Optional[MultiModalConfig], parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig, cache_config: CacheConfig) -> nn.Module:
with set_default_torch_dtype(model_config.dtype):
# with torch.device(device_config.device):
# NOTE(sgm): init the model in cpu
model = _initialize_model(model_config, self.load_config, lora_config, multimodal_config, cache_config,
scheduler_config)
model.load_weights(self._get_weights_iterator(actor_model))
for _, module in model.named_modules():
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_method.process_weights_after_loading(module)
# FIXME: Remove this after Mixtral is updated
# to use quant_method.
if hasattr(module, "process_weights_after_loading"):
module.process_weights_after_loading()
# NOTE(sgm) Some weights are point to gpu, but still need this.
model = model.cuda() # NOTE (zhangchi.usc1992) We need this for vllm to profile memory usage
return model.eval()
class DTensorLoader(BaseModelLoader):
"""Model loader that can load the model weights from partitioned megatron model."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(f"Model loader extra config is not supported for "
f"load format {load_config.load_format}")
def _get_weights_iterator(actor_model: Union[PreTrainedModel, Dict]):
# NOTE(shengguangming) Load the weights from the actor model
pass
# if isinstance(actor_model, nn.Module):
# load_weights(actor_weights=dict(actor_model.named_parameters(remove_duplicate=False)), vllm_model=model)
# else:
# load_weights(actor_weights=actor_model, vllm_model=model)
# return actor_model
def load_model(self, actor_model: Union[PreTrainedModel, Dict], model_config: ModelConfig,
device_config: DeviceConfig, lora_config: Optional[LoRAConfig],
multimodal_config: Optional[MultiModalConfig], parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig, cache_config: CacheConfig) -> nn.Module:
with set_default_torch_dtype(model_config.dtype):
with torch.device(device_config.device):
model = _initialize_model(model_config, self.load_config, lora_config, multimodal_config, cache_config,
scheduler_config)
# TODO(sgm): This is a hack, we need to register the load_weight() func for each model in vllm
if isinstance(actor_model, nn.Module):
load_dtensor_weights(actor_weights=dict(actor_model.named_parameters(remove_duplicate=False)),
vllm_model=model)
else:
load_dtensor_weights(actor_weights=actor_model, vllm_model=model)
for _, module in model.named_modules():
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_method.process_weights_after_loading(module)
# FIXME: Remove this after Mixtral is updated
# to use quant_method.
if hasattr(module, "process_weights_after_loading"):
module.process_weights_after_loading()
# NOTE(sgm) Some weights are point to gpu, but still need this.
model = model.cuda() # NOTE (zhangchi.usc1992) We need this for vllm to profile memory usage
return model.eval()
# FIXME(sgm): hack the _get_logits function in vllm v0.4.2
# as they use ray, the _get_logits result will only need to return to the driver node,
# therefore gather is enough. However, we use SPMD instead of a central scheduler,
# all_gather is required (aligned with v0.2.6)
def _get_logits(self, hidden_states: torch.Tensor, embedding: torch.Tensor,
embedding_bias: Optional[torch.Tensor]) -> torch.Tensor:
# Get the logits for the next tokens.
logits = torch.matmul(hidden_states, embedding.t())
if embedding_bias is not None:
logits += embedding_bias
logits = tensor_model_parallel_all_gather(logits)
# Remove paddings in vocab (if any).
if logits is not None:
logits = logits[:, :self.org_vocab_size]
return logits
from vllm.model_executor.layers.logits_processor import LogitsProcessor
def logitsprocessor_init(self,
vocab_size: int,
org_vocab_size: Optional[int] = None,
scale: float = 1.0,
logits_as_input: bool = False,
soft_cap: Optional[float] = None) -> None:
"""
Args:
scale: A scaling factor to apply to the logits.
"""
super(LogitsProcessor, self).__init__()
self.scale = scale
self.vocab_size = vocab_size
# Whether the input is logits (default is hidden states).
self.logits_as_input = logits_as_input
# original vocabulary size (without LoRA).
self.org_vocab_size = org_vocab_size or vocab_size
# Soft cap the logits. Used in Gemma 2.
self.soft_cap = soft_cap
# Whether to use gather or all-gather to gather the logits.
self.use_gather = False
LogitsProcessor.__init__ = logitsprocessor_init # use all_gather
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/model_runner.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/worker/model_runner.py
import torch
import torch.nn as nn
from enum import IntEnum
from typing import Dict, List, Optional, Set, Tuple, Union
import warnings
import vllm.envs as envs
from vllm.attention import (AttentionMetadata, get_attn_backend)
from vllm.config import (CacheConfig, DeviceConfig, LoRAConfig, MultiModalConfig, ParallelConfig, PromptAdapterConfig,
SchedulerConfig)
from vllm.logger import init_logger
from vllm.lora.layers import LoRAMapping
from vllm.lora.request import LoRARequest
from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager
from vllm.model_executor import SamplingMetadata
from vllm.model_executor.models.interfaces import (supports_lora, supports_vision)
from vllm.utils import (CudaMemoryProfiler, is_hip, is_pin_memory_available)
from vllm.worker.model_runner import ModelRunner, CUDAGraphRunner
from vllm.prompt_adapter.worker_manager import (LRUCacheWorkerPromptAdapterManager)
from .model_loader import get_model
from .config import ModelConfig, LoadConfig
logger = init_logger(__name__)
# How batches are constructed.
class BatchType(IntEnum):
# Every batch is prefill.
PREFILL = 0
# Every batch is decode.
DECODE = 1
# Batch is a mixture of prefill and decode.
MIXED = 2
class ModelRunner(ModelRunner):
def __init__(
self,
model: Union[nn.Module, Dict], # [verl] model itself or its parameter dict
model_config: ModelConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
cache_config: CacheConfig,
load_config: LoadConfig,
lora_config: Optional[LoRAConfig],
kv_cache_dtype: Optional[str] = "auto",
is_driver_worker: bool = False,
prompt_adapter_config: Optional[PromptAdapterConfig] = None,
multimodal_config: Optional[MultiModalConfig] = None,
return_hidden_states: bool = False,
):
super().__init__(
model_config,
parallel_config,
scheduler_config,
device_config,
cache_config,
load_config,
lora_config,
kv_cache_dtype,
is_driver_worker=True, # a hack
prompt_adapter_config=prompt_adapter_config,
multimodal_config=multimodal_config,
return_hidden_states=return_hidden_states)
# NOTE(sgm): add for verl
self.model = model # this will be replaced by get_model()
# NOTE(sgm): initialize model using the actor model
def load_model(self) -> None:
logger.info("Starting to load model %s...", self.model_config.model)
with CudaMemoryProfiler() as m:
self.model = get_model(actor_model=self.model,
model_config=self.model_config,
device_config=self.device_config,
lora_config=self.lora_config,
load_config=self.load_config,
parallel_config=self.parallel_config,
scheduler_config=self.scheduler_config,
multimodal_config=self.multimodal_config,
cache_config=self.cache_config)
self.model_memory_usage = m.consumed_memory
logger.info("Loading model weights took %.4f GB", self.model_memory_usage / float(2**30))
if self.lora_config:
assert supports_lora(self.model), "Model does not support LoRA"
assert not supports_vision(self.model), "To be tested: vision language model with LoRA settings."
self.lora_manager = LRUCacheWorkerLoRAManager(
self.scheduler_config.max_num_seqs,
self.scheduler_config.max_num_batched_tokens,
self.vocab_size,
self.lora_config,
self.device,
self.model.embedding_modules,
self.model.embedding_padding_modules,
max_position_embeddings=self.model.config.max_position_embeddings,
)
self.model = self.lora_manager.create_lora_manager(self.model)
if self.prompt_adapter_config:
self.prompt_adapter_manager = LRUCacheWorkerPromptAdapterManager(
self.scheduler_config.max_num_seqs, self.scheduler_config.max_num_batched_tokens, self.device,
self.prompt_adapter_config)
self.model = (self.prompt_adapter_manager.create_prompt_adapter_manager(self.model))
if self.kv_cache_dtype == "fp8" and is_hip():
# Currently only ROCm accepts kv-cache scaling factors
# via quantization_param_path and this will be deprecated
# in the future.
if self.model_config.quantization_param_path is not None:
if callable(getattr(self.model, "load_kv_cache_scales", None)):
warnings.warn(
"Loading kv cache scaling factor from JSON is "
"deprecated and will be removed. Please include "
"kv cache scaling factors in the model checkpoint.",
FutureWarning,
stacklevel=2)
self.model.load_kv_cache_scales(self.model_config.quantization_param_path)
logger.info("Loaded KV cache scaling factors from %s", self.model_config.quantization_param_path)
else:
raise RuntimeError(
"Using FP8 KV cache and scaling factors provided but "
"model %s does not support loading scaling factors.", self.model.__class__)
else:
logger.warning("Using FP8 KV cache but no scaling factors "
"provided. Defaulting to scaling factors of 1.0. "
"This may lead to less accurate results!")
if envs.VLLM_TEST_DYNAMO_GRAPH_CAPTURE:
self.model = torch.compile(self.model, fullgraph=True, backend="eager")
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/parallel_state.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Adapted from
# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/parallel_state.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Model and data parallel groups."""
import os
import torch
import torch.distributed
from typing import Optional
import vllm.distributed.parallel_state as ps
from vllm.distributed.parallel_state import get_pp_group, get_world_group, init_distributed_environment, init_model_parallel_group
import vllm.envs as envs
from vllm.logger import init_logger
from torch.distributed.device_mesh import init_device_mesh
logger = init_logger(__name__)
"""
This version is strongly tied with Megatron to implement HybridEngine and weight sharing between vllm and Megatron.
- We assume the Megatron tp+dp+pp world is already established before calling this function.
"""
# Device mesh for using DTensor
_DEVICE_MESH = None
# Tensor model parallel group that the current rank belongs to.
_TP = None
# Pipeline model parallel group that the current rank belongs to.
_PP = None
# This method is for initializing the ParallelGroup when using HybridEngine
def initialize_parallel_state(
distributed_init_method: str = "env://",
backend: str = "nccl",
tensor_model_parallel_size: int = 1,
num_tp_per_train_tp: int = 1,
pipeline_model_parallel_size: int = 1,
):
# torch.distributed.all_reduce does not free the input tensor until
# the synchronization point. This causes the memory usage to grow
# as the number of all_reduce calls increases. This env var disables
# this behavior.
# Related issue:
# https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
# NOTE(sgm): Modify for verl, Env vars will be set by TORCHRUN.
rank = int(os.getenv("RANK", "-1"))
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# Use the world_size set by TORCHRUN
world_size = int(os.getenv("WORLD_SIZE", "-1"))
assert world_size != -1, "The world_size is set to -1, not initialized by TORCHRUN"
init_distributed_environment(world_size, rank, distributed_init_method, local_rank, backend)
if torch.distributed.get_world_size() > 1:
# NOTE: build a sepearate inference group with infer tp & micro dp
initialize_model_parallel_for_vllm(tensor_model_parallel_size=tensor_model_parallel_size,
num_tensor_model_parallel_groups_per_train_tp=num_tp_per_train_tp)
else:
initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, backend)
def ensure_model_parallel_initialized(
tensor_model_parallel_size: int,
pipeline_model_parallel_size: int = 1,
backend: Optional[str] = None,
) -> None:
"""Helper to initialize model parallel groups if they are not initialized,
or ensure tensor-parallel and pipeline-parallel sizes are equal to expected
values if the model parallel groups are initialized.
"""
# get the backend of _DEVICE_WORLD_GROUP
backend = backend or torch.distributed.get_backend(get_world_group().device_group)
if not model_parallel_is_initialized():
initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, backend)
return
assert (get_tensor_model_parallel_world_size() == tensor_model_parallel_size), (
"tensor parallel group already initialized, but of unexpected size: "
f"{get_tensor_model_parallel_world_size()=} vs. "
f"{tensor_model_parallel_size=}")
pp_world_size = get_pp_group().world_size
assert (pp_world_size == pipeline_model_parallel_size), (
"pipeline parallel group already initialized, but of unexpected size: "
f"{pp_world_size=} vs. "
f"{pipeline_model_parallel_size=}")
# TODO(sgm): deviate from the v0.5.4, not pp now
def model_parallel_is_initialized():
"""Check if tensor and pipeline parallel groups are initialized."""
return (ps._TP is not None)
# and _PIPELINE_MODEL_PARALLEL_GROUP is not None)
def initialize_model_parallel_for_vllm(tensor_model_parallel_size: int,
num_tensor_model_parallel_groups_per_train_tp: int = 1,
pipeline_model_parallel_size: int = 1) -> None:
from torch.distributed import new_group
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
assert isinstance(tensor_model_parallel_size, int)
# assert num_tensor_model_parallel_groups_per_train_tp == 1 and not different_tp_group
# assert num_tensor_model_parallel_groups_per_train_tp > 1 and different_tp_group
# Build the tensor model-parallel groups.
assert ps._TP is None, ("tensor model parallel group is already initialized")
global _TP
world_size: int = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
backend = torch.distributed.get_backend()
num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size
if num_tensor_model_parallel_groups_per_train_tp == 1:
# if tensor_model_parallel_size == train_tensor_parallel_size:
# using the same tp group as Megatron/vllm
assert _TP is None, ("tensor model parallel group is already initialized")
group_ranks = []
for i in range(num_tensor_model_parallel_groups):
ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
group_ranks.append(ranks)
_TP = init_model_parallel_group(
group_ranks=group_ranks,
local_rank=get_world_group().local_rank,
backend=backend,
use_custom_allreduce=False, # TODO: check why True is not work in Ray trainer
use_message_queue_broadcaster=True)
ps._TP = _TP
# _MICRO_DATA_PARALLEL_GROUP is move to hybrid engine
else:
# initialize a micro_dp group and a tp group
# assume training tp=4, infer tp=2, then, weight is partitioned as
# [1], [2], [3], [4] for training and [1,2], [1,2], [3,4], [3,4] for inference
# Build the inference tp groups
# train_tp = train_tensor_parallel_size
train_tp = num_tensor_model_parallel_groups_per_train_tp * tensor_model_parallel_size
# num_tensor_model_parallel_groups_per_train_tp = train_tp // tensor_model_parallel_size
assert _TP is None, ("tensor model parallel group is already initialized")
group_ranks = []
for i in range(num_tensor_model_parallel_groups // num_tensor_model_parallel_groups_per_train_tp):
start = train_tp * i
end = train_tp * (i + 1)
for j in range(num_tensor_model_parallel_groups_per_train_tp):
ranks = list(range(start, end, num_tensor_model_parallel_groups_per_train_tp))
for i in range(len(ranks)):
ranks[i] += j
group_ranks.append(ranks)
_TP = init_model_parallel_group(
group_ranks=group_ranks,
local_rank=get_world_group().local_rank,
backend=backend,
use_custom_allreduce=False, # TODO: check why True is not work in Ray trainer
use_message_queue_broadcaster=True)
ps._TP = _TP
# Build the pipeline model-parallel groups.
# global _PIPELINE_MODEL_PARALLEL_GROUP
# global _PIPELINE_GLOBAL_RANKS
# assert ps._PIPELINE_MODEL_PARALLEL_GROUP is None, ("pipeline model parallel group is already initialized")
# ps._PIPELINE_MODEL_PARALLEL_GROUP = mpu.get_pipeline_model_parallel_group()
# ps._PIPELINE_GLOBAL_RANKS = mpu.get_pipeline_model_parallel_ranks()
# TODO: init using device mesh (not support hybrid engine now)
# Build the pipeline model-parallel groups.
num_pipeline_model_parallel_groups: int = (world_size // pipeline_model_parallel_size)
global _PP
assert _PP is None, ("pipeline model parallel group is already initialized")
group_ranks = []
for i in range(num_pipeline_model_parallel_groups):
ranks = list(range(i, world_size, num_pipeline_model_parallel_groups))
group_ranks.append(ranks)
# pipeline parallel does not need custom allreduce
_PP = init_model_parallel_group(group_ranks, get_world_group().local_rank, backend, use_custom_allreduce=False)
ps._PP = _PP # for verl
def initialize_model_parallel(
tensor_model_parallel_size: int = 1,
pipeline_model_parallel_size: int = 1,
backend: Optional[str] = None,
) -> None:
"""
NOTE: This method is a hack from the open-sourced version without
asertion of world_size = tp * pp
Initialize model parallel groups.
Arguments:
tensor_model_parallel_size: number of GPUs used for tensor model
parallelism.
pipeline_model_parallel_size: number of GPUs used for pipeline model
parallelism.
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
the model pipeline. The present function will
create 4 tensor model-parallel groups and 2 pipeline model-parallel groups:
4 tensor model-parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 pipeline model-parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
world_size: int = torch.distributed.get_world_size()
backend = backend or torch.distributed.get_backend(ps.get_world_group().device_group)
# NOTE(sgm) we don't assert world_size == tp * pp
# DP is not managed by vllm but by the verl WorkerGroup
# if (world_size !=
# tensor_model_parallel_size * pipeline_model_parallel_size):
# raise RuntimeError(
# f"world_size ({world_size}) is not equal to "
# f"tensor_model_parallel_size ({tensor_model_parallel_size}) x "
# f"pipeline_model_parallel_size ({pipeline_model_parallel_size})")
num_tensor_model_parallel_groups: int = (world_size // tensor_model_parallel_size)
rank = torch.distributed.get_rank()
global _TP
assert _TP is None, ("tensor model parallel group is already initialized")
group_ranks = []
for i in range(num_tensor_model_parallel_groups):
ranks = list(range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size))
group_ranks.append(ranks)
# message queue broadcaster is only used in tensor model parallel group
_TP = init_model_parallel_group(
group_ranks,
get_world_group().local_rank,
backend,
use_custom_allreduce=False, # TODO: check why True is not work in Ray trainer
use_message_queue_broadcaster=True)
ps._TP = _TP
# TODO: init using device mesh (not support hybrid engine now)
# Build the pipeline model-parallel groups.
num_pipeline_model_parallel_groups: int = (world_size // pipeline_model_parallel_size)
global _PP
assert _PP is None, ("pipeline model parallel group is already initialized")
group_ranks = []
for i in range(num_pipeline_model_parallel_groups):
ranks = list(range(i, world_size, num_pipeline_model_parallel_groups))
group_ranks.append(ranks)
# pipeline parallel does not need custom allreduce
_PP = init_model_parallel_group(group_ranks, get_world_group().local_rank, backend, use_custom_allreduce=False)
ps._PP = _PP # for verl
"""
Device mesh utilities
"""
def get_device_mesh():
assert _DEVICE_MESH is not None, ("device mesh is not initialized")
return _DEVICE_MESH
"""
Tensor model parallel utilities
"""
def get_tensor_model_parallel_group():
"""Get the tensor model parallel group the caller rank belongs to."""
assert _TP is not None, ("tensor model parallel group is not initialized")
return _TP.device_group
def get_tensor_model_parallel_world_size():
"""Return world size for the tensor model parallel group."""
return torch.distributed.get_world_size(group=get_tensor_model_parallel_group())
def get_tensor_model_parallel_rank():
"""Return my rank for the tensor model parallel group."""
return torch.distributed.get_rank(group=get_tensor_model_parallel_group())
def get_tensor_model_parallel_src_rank():
"""Calculate the global rank corresponding to the first local rank
in the tensor model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_tensor_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/spmd_gpu_executor.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/executor/gpu_executor.py
import os
import socket
from typing import Any, Dict, List, Optional, Set, Tuple
import torch
import vllm.envs as envs
from vllm.executor.executor_base import ExecutorBase, ExecutorAsyncBase
from vllm.logger import init_logger
from vllm.lora.request import LoRARequest
from vllm.sequence import SamplerOutput, ExecuteModelRequest
from vllm.config import (CacheConfig, DeviceConfig, LoRAConfig, MultiModalConfig, ParallelConfig, PromptAdapterConfig,
SchedulerConfig, SpeculativeConfig)
from .config import ModelConfig, LoadConfig
logger = init_logger(__name__)
class SPMDGPUExecutor(ExecutorBase):
"""SPMD-based multi-GPU executor implementations."""
def __init__(
self,
model, # pytorch model itself or its parameter dict
model_config: ModelConfig,
cache_config: CacheConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
load_config: LoadConfig,
lora_config: Optional[LoRAConfig],
multimodal_config: Optional[MultiModalConfig],
speculative_config: Optional[SpeculativeConfig],
prompt_adapter_config: Optional[PromptAdapterConfig],
) -> None:
self.model_config = model_config
self.cache_config = cache_config
self.lora_config = lora_config
self.load_config = load_config
self.parallel_config = parallel_config
self.scheduler_config = scheduler_config
self.device_config = device_config
self.multimodal_config = multimodal_config
self.speculative_config = speculative_config
self.prompt_adapter_config = prompt_adapter_config
distributed_init_method = initialize_cluster(parallel_config)
self._init_executor(model, distributed_init_method)
# TODO(sgm): verl not support speculative decode now
def _init_executor(self, model, distributed_init_method) -> None:
assert (not self.speculative_config), "Speculative decoding not yet supported for multi-GPU backend."
# Create the parallel worker for each GPU.
self._init_workers_sp(model, distributed_init_method)
def _init_workers_sp(self, model, distributed_init_method: str):
# Lazy import the Worker to avoid importing torch.cuda/xformers
# before CUDA_VISIBLE_DEVICES is set in the Worker
from .worker import Worker # pylint: disable=import-outside-toplevel
rank = int(os.getenv("RANK"))
local_rank = int(os.getenv("LOCAL_RANK"))
print(f'local rank {local_rank}')
# see https://github.com/NVIDIA/nccl/issues/1234
os.environ['NCCL_CUMEM_ENABLE'] = '0'
self.worker = Worker(
model,
self.model_config,
self.parallel_config,
self.scheduler_config,
self.device_config,
self.cache_config,
self.load_config,
local_rank,
rank,
distributed_init_method,
lora_config=self.lora_config,
multimodal_config=self.multimodal_config,
speculative_config=None,
prompt_adapter_config=self.speculative_config,
is_driver_worker=True,
model_runner_cls=None, # use the default one
)
# NOTE(shengguangming): torch.distributed.init_process_group will be called inside the init_model()
self.worker.init_device()
self.worker.load_model()
def determine_num_available_blocks(self) -> Tuple[int, int]:
"""Determine the number of available KV blocks.
This invokes `determine_num_available_blocks` on each worker and takes
the min of the results, guaranteeing that the selected cache sizes are
compatible with all workers.
Returns:
- tuple[num_gpu_blocks, num_cpu_blocks]
"""
# Get the maximum number of blocks that can be allocated on GPU and CPU.
num_blocks = self.worker.determine_num_available_blocks()
# NOTE(shengguangming): Now we don't use a shared centralized controler but each process will
# have its own scheduler
num_gpu_blocks = num_blocks[0]
num_cpu_blocks = num_blocks[1]
return num_gpu_blocks, num_cpu_blocks
def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks: int) -> None:
"""Initialize the KV cache in all workers.
"""
# NOTE: We log here to avoid multiple logs when number of workers is
# greater than one. We could log in the engine, but not all executors
# have GPUs.
logger.info("# GPU blocks: %d, # CPU blocks: %d", num_gpu_blocks, num_cpu_blocks)
self.cache_config.num_gpu_blocks = num_gpu_blocks
self.cache_config.num_cpu_blocks = num_cpu_blocks
if torch.distributed.get_rank() == 0:
print(
f'before init cache memory allocated: {torch.cuda.memory_allocated() / 1e9}GB, reserved: {torch.cuda.memory_reserved() / 1e9}GB'
)
self.worker.initialize_cache(num_gpu_blocks=num_gpu_blocks, num_cpu_blocks=num_cpu_blocks)
if torch.distributed.get_rank() == 0:
print(
f'after init cache memory allocated: {torch.cuda.memory_allocated() / 1e9}GB, reserved: {torch.cuda.memory_reserved() / 1e9}GB'
)
# NOTE(sgm): This will not profile & capture the model(CUDAGraph) when rebuilding KVCache
def init_cache_engine(self) -> None:
self.worker._init_cache_engine()
def free_cache_engine(self) -> None:
self.worker.free_cache_engine()
def execute_model(self, execute_model_req) -> List[SamplerOutput]:
all_outputs = self.worker.execute_model(execute_model_req=execute_model_req)
# NOTE(sgm):
# Each GPU in vllm under verl has its own spmd_gpu_executor, therefore all GPUs should return the outputs
# In vllm with ray, only the driver worker returns the sampling results.
return all_outputs
def add_lora(self, lora_request: LoRARequest) -> bool:
assert lora_request.lora_int_id > 0, "lora_id must be greater than 0."
return self.worker.add_lora(lora_request=lora_request)
def remove_lora(self, lora_id: int) -> bool:
assert lora_id > 0, "lora_id must be greater than 0."
return self.worker.remove_lora(lora_id=lora_id)
def list_loras(self) -> Set[int]:
return self.worker.list_loras()
def check_health(self) -> None:
# SPMDExecutor will always be healthy as long as
# it's running.
return
# NOTE(sgm) add for verl to pass the abstract class test, not used
from vllm.prompt_adapter.request import PromptAdapterRequest
def add_prompt_adapter(self, prompt_adapter_request: PromptAdapterRequest) -> bool:
assert prompt_adapter_request.prompt_adapter_id > 0, \
"prompt_adapter_id must be greater than 0."
return self.worker.add_prompt_adapter(prompt_adapter_request)
def list_prompt_adapters(self) -> Set[int]:
return self.worker.list_prompt_adapters()
def pin_lora(self, lora_id: int) -> bool:
assert lora_id > 0, "lora_id must be greater than 0."
return self.worker.pin_lora(lora_id)
def pin_prompt_adapter(self, prompt_adapter_id: int) -> bool:
assert prompt_adapter_id > 0, \
"prompt_adapter_id must be greater than 0."
return self.worker.pin_prompt_adapter(prompt_adapter_id)
def remove_prompt_adapter(self, prompt_adapter_id: int) -> bool:
assert prompt_adapter_id > 0, \
"prompt_adapter_id must be greater than 0."
return self.worker.remove_prompt_adapter(prompt_adapter_id)
# NOTE(sgm): add for verl
def offload_model_weights(self) -> None:
self.worker.offload_model_weights()
def sync_model_weights(self, actor_weights: Dict[str, torch.Tensor], load_format: str) -> None:
self.worker.sync_model_weights(actor_weights=actor_weights, load_format=load_format)
def initialize_cluster(
parallel_config: ParallelConfig,
engine_use_ray: bool = False,
ray_address: Optional[str] = None,
) -> Tuple[str, Optional[None]]:
"""Initialize the distributed cluster probably with Ray.
Args:
parallel_config: The configurations for parallel execution.
Returns:
The `distributed_init_method` is the address for initializing the
distributed backend.
"""
# Initialize cluster locally.
port = get_open_port()
# We need to setup the distributed init method to make sure
# the distributed megatron code (e.g., get world size) works correctly.
# distributed_init_method = f"tcp://localhost:{port}"
distributed_init_method = 'env://'
return distributed_init_method
def get_open_port():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", 0))
return s.getsockname()[1]
# TODO(sgm): not implemented async executor yet
class SPMDGPUExecutorAsync(SPMDGPUExecutor, ExecutorAsyncBase):
async def execute_model_async(self, execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]:
"""Executes one model step on the given sequences."""
raise NotImplementedError
async def check_health_async(self) -> None:
"""Checks if the executor is healthy. If not, it should raise an
exception."""
self.check_health()
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/tokenizer.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/transformers_utils/tokenizer_group/tokenizer_group.py
from typing import List, Optional, Tuple, Union
from transformers import (AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast)
from vllm.lora.request import LoRARequest
from vllm.utils import make_async, LRUCache
from vllm.transformers_utils.tokenizers import *
class TokenizerGroup:
"""A group of tokenizers that can be used for LoRA adapters."""
def __init__(self, tokenizer: PreTrainedTokenizer, enable_lora: bool, max_num_seqs: int,
max_input_length: Optional[int]):
self.enable_lora = enable_lora
self.max_input_length = max_input_length
self.tokenizer = tokenizer
self.lora_tokenizers = LRUCache[PreTrainedTokenizer](capacity=max_num_seqs) if enable_lora else None
def ping(self) -> bool:
"""Check if the tokenizer group is alive."""
return True
def get_max_input_len(self, lora_request: Optional[LoRARequest] = None) -> Optional[int]:
"""Get the maximum input length for the LoRA request."""
return self.max_input_length
def encode(self,
prompt: str,
request_id: Optional[str] = None,
lora_request: Optional[LoRARequest] = None) -> List[int]:
tokenizer = self.get_lora_tokenizer(lora_request)
return tokenizer.encode(prompt)
async def encode_async(self,
prompt: str,
request_id: Optional[str] = None,
lora_request: Optional[LoRARequest] = None) -> List[int]:
tokenizer = await self.get_lora_tokenizer_async(lora_request)
return tokenizer.encode(prompt)
def get_lora_tokenizer(self, lora_request: Optional[LoRARequest]) -> "PreTrainedTokenizer":
if not lora_request or not self.enable_lora:
return self.tokenizer
if lora_request.lora_int_id not in self.lora_tokenizers:
# TODO(sgm): the lora tokenizer is also passed, but may be different
tokenizer = self.tokenizer
# tokenizer = (get_lora_tokenizer(
# lora_request, **self.tokenizer_config) or self.tokenizer)
self.lora_tokenizers.put(lora_request.lora_int_id, tokenizer)
return tokenizer
else:
return self.lora_tokenizers.get(lora_request.lora_int_id)
# FIXME(sgm): for simplicity, we assign the special token here
@property
def pad_token_id(self):
return self.tokenizer.pad_token_id
@property
def eos_token_id(self):
return self.tokenizer.eos_token_id
================================================
FILE: verl/third_party/vllm/vllm_v_0_5_4/worker.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/worker/worker.py
"""A GPU worker class."""
import os
import gc
from typing import Dict, List, Tuple, Optional, Union, Type
import torch
import torch.distributed
import torch.nn as nn
from vllm.config import (CacheConfig, DeviceConfig, LoRAConfig, MultiModalConfig, ParallelConfig, PromptAdapterConfig,
SchedulerConfig, SpeculativeConfig)
from vllm.model_executor import set_random_seed
from vllm.sequence import (ExecuteModelRequest, IntermediateTensors, SamplerOutput)
from vllm.worker.cache_engine import CacheEngine
# TODO(sgm): check why vllm has similar file in vllm.model_executor.parallel_utils.parallel_state
from vllm.distributed import (init_distributed_environment, set_custom_all_reduce, get_tensor_model_parallel_group)
from vllm.worker.worker_base import WorkerInput
from vllm.worker.worker import Worker, _check_if_gpu_supports_dtype
from vllm.worker.model_runner_base import ModelRunnerBase, ModelRunnerInputBase
from vllm.worker.embedding_model_runner import EmbeddingModelRunner
from vllm.worker.model_runner import GPUModelRunnerBase
from .model_runner import ModelRunner
from .megatron_weight_loaders import load_megatron_weights
from .hf_weight_loader import load_hf_weights
from .dtensor_weight_loaders import load_dtensor_weights
from .parallel_state import (ensure_model_parallel_initialized)
from .config import ModelConfig, LoadConfig, LoadFormat
class Worker(Worker):
"""A worker class that executes (a partition of) the model on a GPU.
Each worker is associated with a single GPU. The worker is responsible for
maintaining the KV cache and executing the model on the GPU. In case of
distributed inference, each worker is assigned a partition of the model.
"""
def __init__(
self,
model: Union[nn.Module, Dict], # model itself or its parameter dict
model_config: ModelConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
cache_config: CacheConfig,
load_config: LoadConfig,
local_rank: int,
rank: int,
distributed_init_method: str,
lora_config: Optional[LoRAConfig] = None,
multimodal_config: Optional[MultiModalConfig] = None,
speculative_config: Optional[SpeculativeConfig] = None,
prompt_adapter_config: Optional[PromptAdapterConfig] = None,
is_driver_worker: bool = False,
model_runner_cls: Optional[Type[GPUModelRunnerBase]] = None,
) -> None:
# self.model = model # will be replaced in the init_model
self.model_config = model_config
self.parallel_config = parallel_config
self.parallel_config.rank = rank
self.scheduler_config = scheduler_config
self.device_config = device_config
self.cache_config = cache_config
self.local_rank = local_rank
self.rank = rank
self.distributed_init_method = distributed_init_method
self.lora_config = lora_config
self.load_config = load_config
self.prompt_adapter_config = prompt_adapter_config
self.is_driver_worker = is_driver_worker # TODO: we don't need driver
# if parallel_config and is_driver_worker:
# assert rank % parallel_config.tensor_parallel_size == 0, \
# "Driver worker should be rank 0 of tensor parallel group."
if self.model_config.trust_remote_code:
# note: lazy import to avoid importing torch before initializing
from vllm.utils import init_cached_hf_modules
init_cached_hf_modules()
self.multimodal_config = multimodal_config
# Return hidden states from target model if the draft model is an
# mlp_speculator
speculative_args = {} if speculative_config is None \
or (speculative_config.draft_model_config.model ==
model_config.model) \
or (speculative_config.draft_model_config.hf_config.model_type
not in ["medusa", "mlp_speculator"]) \
else {"return_hidden_states": True}
# TODO(sgm): set correct model runner class
ModelRunnerClass: Type[GPUModelRunnerBase] = ModelRunner
if model_runner_cls is not None:
ModelRunnerClass = model_runner_cls
elif self.model_config.embedding_mode:
ModelRunnerClass = EmbeddingModelRunner
self.model_runner: GPUModelRunnerBase = ModelRunnerClass(
model, # [VERL]: add for verl
model_config,
parallel_config,
scheduler_config,
device_config,
cache_config,
load_config=load_config,
lora_config=self.lora_config,
kv_cache_dtype=self.cache_config.cache_dtype,
is_driver_worker=is_driver_worker,
prompt_adapter_config=prompt_adapter_config,
multimodal_config=multimodal_config,
**speculative_args,
)
# Uninitialized cache engine. Will be initialized by
# initialize_cache.
self.cache_engine: List[CacheEngine] = None
# Initialize gpu_cache as embedding models don't initialize kv_caches
self.gpu_cache: Optional[List[List[torch.Tensor]]] = None
# NOTE(sgm): [VERL] For offloading inference engine params
self.cpu_model = None
def init_device(self) -> None:
if self.device_config.device.type == "cuda":
# torch.distributed.all_reduce does not free the input tensor until
# the synchronization point. This causes the memory usage to grow
# as the number of all_reduce calls increases. This env var disables
# this behavior.
# Related issue:
# https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
# NOTE(sgm): Modify for verl, Env vars will be set by TORCHRUN.
self.rank = self.rank if self.rank is not None else int(os.getenv("RANK", "-1"))
local_rank = int(os.getenv("LOCAL_RANK", "0"))
self.device = torch.device(f"cuda:{local_rank}")
if self.rank < 0:
raise ValueError("Invalid or unspecified rank.")
torch.cuda.set_device(self.device)
# Use the world_size set by TORCHRUN
world_size = int(os.getenv("WORLD_SIZE", "-1"))
assert world_size != -1, "The world_size is set to -1, not initialized by TORCHRUN"
self.parallel_config.world_size = world_size
_check_if_gpu_supports_dtype(self.model_config.dtype)
torch.cuda.empty_cache()
self.init_gpu_memory = torch.cuda.mem_get_info()[0]
else:
raise RuntimeError(f"Not support device type: {self.device_config.device}")
# Initialize the distributed environment.
init_worker_distributed_environment(self.parallel_config, self.rank, self.distributed_init_method,
self.local_rank)
# Set random seed.
set_random_seed(self.model_config.seed)
# self.model = get_model(actor_model=self.model, model_config=self.model_config)
@torch.inference_mode()
def determine_num_available_blocks(self) -> Tuple[int, int]:
"""Profiles the peak memory usage of the model to determine how many
KV blocks may be allocated without OOMs.
The engine will first conduct a profiling of the existing memory usage.
Then, it calculate the maximum possible number of GPU and CPU blocks
that can be allocated with the remaining free memory.
.. tip::
You may limit the usage of GPU memory
by adjusting the `gpu_memory_utilization` parameter.
"""
# Profile the memory usage of the model and get the maximum number of
# cache blocks that can be allocated with the remaining free memory.
torch.cuda.empty_cache()
# torch.cuda.reset_peak_memory_stats()
# Execute a forward pass with dummy inputs to profile the memory usage
# of the model.
self.model_runner.profile_run()
# Calculate the number of blocks that can be allocated with the
# profiled peak memory.
torch.cuda.synchronize()
free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info()
peak_memory = total_gpu_memory - free_gpu_memory
assert peak_memory > 0, ("Error in memory profiling. This happens when the GPU memory was "
"not properly cleaned up before initializing the vLLM instance.")
cache_block_size = self.get_cache_block_size_bytes()
# NOTE(sgm) [VERL] use the remaining memory
num_gpu_blocks = int((free_gpu_memory * self.cache_config.gpu_memory_utilization) // cache_block_size)
# num_gpu_blocks = int((total_gpu_memory * self.cache_config.gpu_memory_utilization - peak_memory) // cache_block_size)
num_cpu_blocks = int(self.cache_config.swap_space_bytes // cache_block_size)
num_gpu_blocks = max(num_gpu_blocks, 0)
num_cpu_blocks = max(num_cpu_blocks, 0)
if self.model_runner.lora_manager:
self.model_runner.remove_all_loras()
# NOTE(sgm): Add for [VERL], synchronize number of blocks with all the rank
num_gpu_blocks = torch.tensor([num_gpu_blocks], device='cuda')
num_cpu_blocks = torch.tensor([num_cpu_blocks], device='cuda')
torch.distributed.all_reduce(num_gpu_blocks,
op=torch.distributed.ReduceOp.MIN,
group=get_tensor_model_parallel_group().device_group)
torch.distributed.all_reduce(num_cpu_blocks,
op=torch.distributed.ReduceOp.MIN,
group=get_tensor_model_parallel_group().device_group)
num_gpu_blocks = num_gpu_blocks.item()
num_cpu_blocks = num_cpu_blocks.item()
gc.collect()
torch.cuda.empty_cache()
return num_gpu_blocks, num_cpu_blocks
def _init_cache_engine(self):
if self.cache_engine is None and self.gpu_cache is None:
super()._init_cache_engine()
def free_cache_engine(self):
# ensure `enforce_eager=True`
self.cache_engine = None
self.gpu_cache = None
# NOTE(sgm): [VERL]: adapt from _execute_model_spmd()
def execute_model(self,
execute_model_req: ExecuteModelRequest,
intermediate_tensors: Optional[IntermediateTensors] = None) -> Optional[List[SamplerOutput]]:
"""
Execute model in Single Program Multiple Data (SPMD) fashion.
All workers take the same request, prepare the input and
execute the model.
"""
assert execute_model_req is not None, ("_execute_model_spmd() requires each worker to take in an "
"ExecuteModelRequest")
worker_input: WorkerInput = self.prepare_worker_input(execute_model_req=execute_model_req)
model_input: ModelRunnerInputBase = (self.model_runner.prepare_model_input(
execute_model_req.seq_group_metadata_list))
# verl.worker.workerbase.WorkerBase
# swap cache
super().execute_worker(worker_input)
# If there is no input, we don't need to execute the model.
if worker_input.num_seq_groups == 0:
return []
return self.model_runner.execute_model(
model_input, self.kv_cache[worker_input.virtual_engine] if self.kv_cache is not None else None,
intermediate_tensors)
# assume the input is .state_dict()
def sync_model_weights(self, actor_weights: Dict, load_format: str):
if load_format in [LoadFormat.MEGATRON, LoadFormat.AUTO]:
load_megatron_weights(actor_weights, self.model_runner.model)
elif load_format == LoadFormat.HF:
# full model state dict without no sharding
load_hf_weights(actor_weights, self.model_runner.model)
elif load_format == LoadFormat.DTENSOR:
load_dtensor_weights(actor_weights, self.model_runner.model)
def offload_model_weights(self) -> None:
if self.cpu_model == None:
self.cpu_model = {}
for name, params in self.model_runner.model.named_parameters():
self.cpu_model[name] = torch.empty_like(params, device='cpu')
params.data = self.cpu_model[name]
else:
for name, params in self.model_runner.model.named_parameters():
params.data = self.cpu_model[name]
def init_worker_distributed_environment(
parallel_config: ParallelConfig,
rank: int,
distributed_init_method: Optional[str] = "env://",
local_rank: int = -1,
) -> None:
"""Initialize the distributed environment."""
set_custom_all_reduce(not parallel_config.disable_custom_all_reduce)
# NOTE(sgm) use tcp://localhost:xxxx will hang in HF setting without megatron
init_distributed_environment(parallel_config.world_size, rank, distributed_init_method, local_rank)
ensure_model_parallel_initialized(tensor_model_parallel_size=parallel_config.tensor_parallel_size,
pipeline_model_parallel_size=parallel_config.pipeline_parallel_size)
# TODO(sgm): check whether need this
# if pynccl_utils.is_initialized():
# pynccl_world_size = pynccl_utils.get_world_size()
# if pynccl_world_size != parallel_config.world_size:
# raise RuntimeError(
# "pynccl is already initialized but the pynccl world "
# "size does not match parallel_config.world_size "
# f"({pynccl_world_size} vs. {parallel_config.world_size}).")
# elif parallel_config.world_size > 1:
# # NOTE(woosuk): We don't initialize pynccl process group when world size
# # is 1.
# # NOTE(kaichao): By default, pynccl is initialized for tp group.
# pynccl_utils.init_process_group(
# group=get_tensor_model_parallel_cpu_group())
# # Initialize a custom fast all-reduce implementation.
# if not parallel_config.disable_custom_all_reduce:
# init_custom_ar()
# A small all_reduce for warmup.
torch.distributed.all_reduce(torch.zeros(1).cuda())
# if pynccl_utils.is_initialized():
# pynccl_utils.all_reduce(torch.zeros(1).cuda())
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/arg_utils.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/engine/arg_utils.py
import os
from dataclasses import dataclass
from transformers import PretrainedConfig
from vllm.config import EngineConfig
from vllm.engine.arg_utils import EngineArgs
from .config import LoadConfig, ModelConfig
@dataclass
class EngineArgs(EngineArgs):
model_hf_config: PretrainedConfig = None # for verl
def __post_init__(self):
pass
def create_model_config(self) -> ModelConfig:
return ModelConfig(
hf_config=self.model_hf_config,
tokenizer_mode=self.tokenizer_mode,
trust_remote_code=self.trust_remote_code,
dtype=self.dtype,
seed=self.seed,
revision=self.revision,
code_revision=self.code_revision,
rope_scaling=self.rope_scaling,
rope_theta=self.rope_theta,
tokenizer_revision=self.tokenizer_revision,
max_model_len=self.max_model_len,
quantization=self.quantization,
quantization_param_path=self.quantization_param_path,
enforce_eager=self.enforce_eager,
max_context_len_to_capture=self.max_context_len_to_capture,
max_seq_len_to_capture=self.max_seq_len_to_capture,
max_logprobs=self.max_logprobs,
disable_sliding_window=self.disable_sliding_window,
skip_tokenizer_init=self.skip_tokenizer_init,
served_model_name=self.served_model_name,
limit_mm_per_prompt=self.limit_mm_per_prompt,
use_async_output_proc=not self.disable_async_output_proc,
override_neuron_config=self.override_neuron_config,
config_format=self.config_format,
mm_processor_kwargs=self.mm_processor_kwargs,
)
def create_load_config(self) -> LoadConfig:
return LoadConfig(
load_format=self.load_format,
download_dir=self.download_dir,
model_loader_extra_config=self.model_loader_extra_config,
ignore_patterns=self.ignore_patterns,
)
def create_engine_config(self) -> EngineConfig:
engine_config = super().create_engine_config()
# NOTE[VERL]: Use the world_size set by torchrun
world_size = int(os.getenv("WORLD_SIZE", "-1"))
assert world_size != -1, "The world_size is set to -1, not initialized by TORCHRUN"
engine_config.parallel_config.world_size = world_size
return engine_config
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/config.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/config.py
import enum
import json
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, List, Optional, Union
from transformers import PretrainedConfig
# Add for verl
from vllm.config import ModelConfig
from vllm.logger import init_logger
from vllm.utils import is_hip
if TYPE_CHECKING:
from vllm.model_executor.model_loader.loader import BaseModelLoader
logger = init_logger(__name__)
class LoadFormat(str, enum.Enum):
AUTO = "auto"
MEGATRON = "megatron"
HF = "hf"
DTENSOR = "dtensor"
DUMMY_HF = "dummy_hf"
DUMMY_MEGATRON = "dummy_megatron"
DUMMY_DTENSOR = "dummy_dtensor"
class ModelConfig(ModelConfig):
def __init__(self, hf_config: PretrainedConfig, *args, **kwargs) -> None:
super().__init__(model=hf_config._name_or_path, tokenizer=hf_config._name_or_path, *args, **kwargs)
self.hf_config = hf_config
@dataclass
class LoadConfig:
"""
download_dir: Directory to download and load the weights, default to the
default cache directory of huggingface.
load_format: The format of the model weights to load:
"auto" will try to load the weights in the safetensors format and
fall back to the pytorch bin format if safetensors format is
not available.
"pt" will load the weights in the pytorch bin format.
"safetensors" will load the weights in the safetensors format.
"npcache" will load the weights in pytorch format and store
a numpy cache to speed up the loading.
"dummy" will initialize the weights with random values, which is
mainly for profiling.
"tensorizer" will use CoreWeave's tensorizer library for
fast weight loading.
"bitsandbytes" will load nf4 type weights.
ignore_patterns: The list of patterns to ignore when loading the model.
Default to "original/**/*" to avoid repeated loading of llama's
checkpoints.
"""
load_format: Union[str, LoadFormat, "BaseModelLoader"] = LoadFormat.AUTO
download_dir: Optional[str] = None
model_loader_extra_config: Optional[Union[str, dict]] = field(default_factory=dict)
ignore_patterns: Optional[Union[List[str], str]] = None
def __post_init__(self):
model_loader_extra_config = self.model_loader_extra_config or {}
if isinstance(model_loader_extra_config, str):
self.model_loader_extra_config = json.loads(model_loader_extra_config)
self._verify_load_format()
if self.ignore_patterns is not None and len(self.ignore_patterns) > 0:
logger.info("Ignoring the following patterns when downloading weights: %s", self.ignore_patterns)
else:
self.ignore_patterns = ["original/**/*"]
def _verify_load_format(self) -> None:
if not isinstance(self.load_format, str):
return
load_format = self.load_format.lower()
self.load_format = LoadFormat(load_format)
rocm_not_supported_load_format: List[str] = []
if is_hip() and load_format in rocm_not_supported_load_format:
rocm_supported_load_format = [
f for f in LoadFormat.__members__ if (f not in rocm_not_supported_load_format)
]
raise ValueError(f"load format '{load_format}' is not supported in ROCm. "
f"Supported load formats are "
f"{rocm_supported_load_format}")
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/dtensor_weight_loaders.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/model_loader
from typing import Dict
import torch.nn as nn
from torch.distributed._tensor import DTensor
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.utils import is_pp_missing_parameter
def gemma_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
for param_name, shard_name, shard_id in stacked_params_mapping:
if shard_name not in name:
continue
stacked_name = name.replace(shard_name, param_name)
# Skip loading extra bias for GPTQ models.
if stacked_name.endswith(".bias") and stacked_name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[stacked_name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# lm_head is not used in vllm as it is tied with embed_token.
# To prevent errors, skip loading lm_head.weight.
if "lm_head.weight" in name:
continue
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def gptbigcode_dtensor_load_weights(actor_weights: Dict, vllm_model: nn.Module):
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "lm_head.weight" in name:
continue
if ".attn.bias" in name:
# Skip attention mask.
# NOTE: "c_attn.bias" should not be skipped.
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def starcoder2_dtensor_load_weights(actor_weights: Dict, vllm_model: nn.Module):
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
]
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def llama_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
(".qkv_proj", ".q_proj", "q"),
(".qkv_proj", ".k_proj", "k"),
(".qkv_proj", ".v_proj", "v"),
(".gate_up_proj", ".gate_proj", 0),
(".gate_up_proj", ".up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
if "rotary_emb.cos_cached" in name or "rotary_emb.sin_cached" in name:
# Models trained using ColossalAI may include these tensors in
# the checkpoint. Skip them.
continue
# With tie_word_embeddings, we can skip lm_head.weight
# The weight might appear unnecessarily in the files if the model is
# processed with quantization, LoRA, fine-tuning, etc.
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight)
def qwen2_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def qwen2vl_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("qkv_proj", "q_proj", "q"),
("qkv_proj", "k_proj", "k"),
("qkv_proj", "v_proj", "v"),
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
param = params_dict[name]
weight_loader = param.weight_loader
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
from vllm.model_executor.layers.fused_moe import FusedMoE
def deepseekv2_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
stacked_params_mapping = [
# (param_name, shard_name, shard_id)
("gate_up_proj", "gate_proj", 0),
("gate_up_proj", "up_proj", 1),
]
# Params for weights, fp8 weight scales, fp8 activation scales
# (param_name, weight_name, expert_id, shard_id)
expert_params_mapping = FusedMoE.make_expert_params_mapping(
ckpt_gate_proj_name="gate_proj",
ckpt_down_proj_name="down_proj",
ckpt_up_proj_name="up_proj",
num_experts=vllm_model.config.n_routed_experts,
)
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
for param_name, weight_name, shard_id in stacked_params_mapping:
# Skip non-stacked layers and experts (experts handled below).
if weight_name not in name:
continue
# We have mlp.experts[0].gate_proj in the checkpoint.
# Since we handle the experts below in expert_params_mapping,
# we need to skip here BEFORE we update the name, otherwise
# name will be updated to mlp.experts[0].gate_up_proj, which
# will then be updated below in expert_params_mapping
# for mlp.experts[0].gate_gate_up_proj, which breaks load.
if ("mlp.experts." in name) and name not in params_dict:
continue
name = name.replace(weight_name, param_name)
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, vllm_model):
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype), shard_id)
break
else:
for mapping in expert_params_mapping:
param_name, weight_name, expert_id, shard_id = mapping
if weight_name not in name:
continue
name = name.replace(weight_name, param_name)
if is_pp_missing_parameter(name, vllm_model):
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(
param,
local_loaded_weight.to(dtype=param.dtype),
weight_name,
shard_id=shard_id,
expert_id=expert_id,
)
break
else:
# Skip loading extra bias for GPTQ models.
if name.endswith(".bias") and name not in params_dict:
continue
if is_pp_missing_parameter(name, vllm_model):
continue
param = params_dict[name]
local_loaded_weight = redistribute_dtensor(param_name=name, loaded_weights=loaded_weight)
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, local_loaded_weight.to(dtype=param.dtype))
def gpt2_dtensor_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
pass
def redistribute_dtensor(param_name: str, loaded_weights: DTensor, parallelize_plan: Dict = None):
param_name = _process_parameter_names(name=param_name)
if parallelize_plan is not None:
assert (
param_name
in parallelize_plan.keys()), f"param name: {param_name} not in parallelize_plan :{parallelize_plan.keys()}"
placement = parallelize_plan[param_name]
local_loaded_weights = loaded_weights.redistribute(device_mesh=loaded_weights.device_mesh,
placements=placement).to_local()
else:
local_loaded_weights = loaded_weights.full_tensor()
return local_loaded_weights
def _process_parameter_names(name):
# Remove '.weight' if it exists at the end of the string
if name.endswith(".weight"):
name = name[:-7]
# Remove 'model.layers.x.' or 'model.' prefix
if "model.layers" in name:
parts = name.split(".")
# Reconstruct the string without 'model.layers.x.'
name = ".".join(parts[3:]) # parts[0] is 'model', parts[1] is 'layers', parts[2] is 'x'
elif name.startswith("model."):
name = name[6:] # Remove 'model.'
return name
__MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__ = {
"GPT2LMHeadModel": gpt2_dtensor_weight_loader,
"LlamaForCausalLM": llama_dtensor_weight_loader,
"LLaMAForCausalLM": llama_dtensor_weight_loader,
"MistralForCausalLM": llama_dtensor_weight_loader, # mistral is the same as llama in vLLM
"InternLMForCausalLM": llama_dtensor_weight_loader,
"AquilaModel": llama_dtensor_weight_loader,
"AquilaForCausalLM": llama_dtensor_weight_loader,
"Phi3ForCausalLM": llama_dtensor_weight_loader,
"GemmaForCausalLM": gemma_dtensor_weight_loader,
"Gemma2ForCausalLM": gemma_dtensor_weight_loader,
"GPTBigCodeForCausalLM": gptbigcode_dtensor_load_weights,
"Starcoder2ForCausalLM": starcoder2_dtensor_load_weights,
"Qwen2ForCausalLM": qwen2_dtensor_weight_loader,
"DeepseekV2ForCausalLM": deepseekv2_dtensor_weight_loader,
"Qwen2VLForConditionalGeneration": qwen2vl_dtensor_weight_loader,
}
# the actor model is .state_dict()
# Load dtensor weights
def load_dtensor_weights(actor_weights: Dict, vllm_model: nn.Module):
weight_loader = _get_model_weight_loader(vllm_model.__class__.__name__)
weight_loader(actor_weights, vllm_model)
# NOTE(sgm) to reduce peak memory usage, we offload vllm model to cpu
# after init, and we need this after sync model weights for in first iter.
vllm_model = vllm_model.cuda()
def _get_model_weight_loader(arch: str):
if arch in __MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__:
return __MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__[arch]
raise ValueError(f"Model architectures {arch} are not supported for now. "
f"Supported architectures: {__MODEL_DTENSOR_WEIGHT_LOADER_REGISTRY__.keys()}")
# NOTE(sgm): we use per-parameter weight loader in each vllm sub
def update_dtensor_weight_loader():
pass
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/hf_weight_loader.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/model_loader
from typing import Dict
import torch.nn as nn
from vllm.model_executor.model_loader.utils import set_default_torch_dtype
def update_hf_weight_loader():
print("no hf weight loader need to be updated")
return
def load_hf_weights(actor_weights: Dict, vllm_model: nn.Module):
assert isinstance(actor_weights, Dict)
with set_default_torch_dtype(next(vllm_model.parameters()).dtype): # TODO
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in actor_weights.keys():
del actor_weights["lm_head.weight"]
vllm_model.load_weights(actor_weights.items())
for _, module in vllm_model.named_modules():
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_method.process_weights_after_loading(module)
# FIXME: Remove this after Mixtral is updated
# to use quant_method.
if hasattr(module, "process_weights_after_loading"):
module.process_weights_after_loading()
vllm_model = vllm_model.cuda()
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/llm.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/entrypoints/llm.py
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from transformers import PretrainedConfig, PreTrainedTokenizer, PreTrainedTokenizerFast
from verl.workers.rollout.tokenizer import HybridEngineBaseTokenizer
from vllm import LLM
from vllm.outputs import EmbeddingRequestOutput, RequestOutput
from vllm.utils import Counter
from .arg_utils import EngineArgs
from .llm_engine_sp import LLMEngine
class LLM(LLM):
"""An LLM for generating texts from given prompts and sampling parameters.
This class includes a tokenizer, a language model (possibly distributed
across multiple GPUs), and GPU memory space allocated for intermediate
states (aka KV cache). Given a batch of prompts and sampling parameters,
this class generates texts from the model, using an intelligent batching
mechanism and efficient memory management.
NOTE: This class is intended to be used for offline inference. For online
serving, use the `AsyncLLMEngine` class instead.
NOTE: For the comprehensive list of arguments, see `EngineArgs`.
Args:
model: A HuggingFace Transformers model instance.
tokenizer: A HuggingFace Transformers tokenizer instance.
tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
if available, and "slow" will always use the slow tokenizer.
trust_remote_code: Trust remote code (e.g., from HuggingFace) when
downloading the model and tokenizer.
tensor_parallel_size: The number of GPUs to use for distributed
execution with tensor parallelism.
dtype: The data type for the model weights and activations. Currently,
we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
the `torch_dtype` attribute specified in the model config file.
However, if the `torch_dtype` in the config is `float32`, we will
use `float16` instead.
quantization: The method used to quantize the model weights. Currently,
we support "awq". If None, we assume the model weights are not
quantized and use `dtype` to determine the data type of the weights.
revision: The specific model version to use. It can be a branch name,
a tag name, or a commit id.
tokenizer_revision: The specific tokenizer version to use. It can be a
branch name, a tag name, or a commit id.
seed: The seed to initialize the random number generator for sampling.
gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
reserve for the model weights, activations, and KV cache. Higher
values will increase the KV cache size and thus improve the model's
throughput. However, if the value is too high, it may cause out-of-
memory (OOM) errors.
swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
This can be used for temporarily storing the states of the requests
when their `best_of` sampling parameters are larger than 1. If all
requests will have `best_of=1`, you can safely set this to 0.
Otherwise, too small values may cause out-of-memory (OOM) errors.
enforce_eager: Whether to enforce eager execution. If True, we will
disable CUDA graph and always execute the model in eager mode.
If False, we will use CUDA graph and eager execution in hybrid.
max_context_len_to_capture: Maximum context len covered by CUDA graphs.
When a sequence has context length larger than this, we fall back
to eager mode.
disable_custom_all_reduce: See ParallelConfig
"""
def __init__(
self,
model: Union[nn.Module, Dict], # model itself or its parameter dict
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast, HybridEngineBaseTokenizer],
model_hf_config: PretrainedConfig,
tokenizer_mode: str = "auto",
trust_remote_code: bool = False,
skip_tokenizer_init: bool = False,
tensor_parallel_size: int = 1,
dtype: str = "auto",
quantization: Optional[str] = None,
revision: Optional[str] = None,
tokenizer_revision: Optional[str] = None,
seed: int = 0,
gpu_memory_utilization: float = 0.9,
swap_space: int = 4,
cpu_offload_gb: float = 0,
enforce_eager: bool = False,
max_context_len_to_capture: Optional[int] = None,
max_seq_len_to_capture: int = 8192,
disable_custom_all_reduce: bool = False,
load_format="auto",
**kwargs,
) -> None:
if "disable_log_stats" not in kwargs:
kwargs["disable_log_stats"] = True
removed_vision_keys = ("image_token_id", "image_feature_size", "image_input_shape", "image_input_type")
if any(k in kwargs for k in removed_vision_keys):
raise TypeError("There is no need to pass vision-related arguments anymore.")
engine_args = EngineArgs(
model_hf_config=model_hf_config,
# tokenizer=tokenizer,
tokenizer_mode=tokenizer_mode,
skip_tokenizer_init=skip_tokenizer_init,
trust_remote_code=trust_remote_code,
tensor_parallel_size=tensor_parallel_size,
dtype=dtype,
quantization=quantization,
revision=revision,
tokenizer_revision=tokenizer_revision,
seed=seed,
gpu_memory_utilization=gpu_memory_utilization,
swap_space=swap_space,
cpu_offload_gb=cpu_offload_gb,
enforce_eager=enforce_eager,
max_context_len_to_capture=max_context_len_to_capture,
max_seq_len_to_capture=max_seq_len_to_capture,
disable_custom_all_reduce=disable_custom_all_reduce,
load_format=load_format,
**kwargs,
)
tokenizer_cls = (PreTrainedTokenizer, PreTrainedTokenizerFast, HybridEngineBaseTokenizer)
if not isinstance(tokenizer, tokenizer_cls):
raise ValueError(
f"Unexpected tokenizer type: {type(tokenizer)}. Must be"
"one of the following: PreTrainedTokenizer, PreTrainedTokenizerFast, verl.workers.rollout.HybridEngineBaseTokenizer"
)
self.llm_engine = LLMEngine.from_engine_args(model, tokenizer, engine_args) # TODO: check usagecontext
self.request_counter = Counter()
def init_cache_engine(self):
self.llm_engine.init_cache_engine()
def free_cache_engine(self):
self.llm_engine.free_cache_engine()
def get_tokenizer(self) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]:
return self.llm_engine.tokenizer
def set_tokenizer(
self,
tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
) -> None:
self.llm_engine.tokenizer = tokenizer
def _run_engine(self, *, use_tqdm: bool) -> List[Union[RequestOutput, EmbeddingRequestOutput]]:
outputs = super()._run_engine(use_tqdm=use_tqdm)
return self._post_process_outputs(outputs)
# # NOTE(shengguangming): add for verl
# # TODO(sgm): we can optimize it by making the dataloader yield List[int] without padding.
# def _pre_process_inputs(self, prompt_token_ids: torch.Tensor) -> List[int]:
# # remove the left padding in the prompt token_id
# pad_token_id = self.llm_engine.tokenizer.pad_token_id if self.llm_engine.tokenizer.pad_token_id is not None else self.llm_engine.tokenizer.eos_token_id
# non_pad_index = torch.nonzero(prompt_token_ids != pad_token_id, as_tuple=False)[0][0]
# token_ids = prompt_token_ids[non_pad_index:].tolist()
# return token_ids
# NOTE(shengguangming): add for verl
def _post_process_outputs(self, request_outputs: List[RequestOutput]) -> Tuple[torch.Tensor, torch.Tensor]:
output_token_ids = []
logprobs = []
for request_output in request_outputs: # List[RequestOutput]
outputs = request_output.outputs
for output in outputs: # List[CompletionOutput], usually len == 1
output_token_ids.append(torch.tensor(output.token_ids))
# TODO(shengguangming): can be optimzied by rewrite the Sampler._get_logprobs() logits
logprobs_dicts = output.logprobs
if logprobs_dicts is not None:
logprob = []
for logprobs_dict, id in zip(logprobs_dicts, output.token_ids):
logprob.append(logprobs_dict[id].logprob)
logprobs.append(torch.tensor(logprob))
pad_token_id = (self.llm_engine.tokenizer.pad_token_id if self.llm_engine.tokenizer.pad_token_id is not None
else self.llm_engine.tokenizer.eos_token_id)
output_token_ids = pad_sequence(output_token_ids, batch_first=True, padding_value=pad_token_id)
if len(logprobs) > 0:
logprobs = pad_sequence(logprobs, batch_first=True, padding_value=pad_token_id)
return output_token_ids, logprobs
def sync_model_weights(self, actor_weights: Dict[str, torch.Tensor], load_format: str) -> None:
self.llm_engine.sync_model_weights(actor_weights=actor_weights, load_format=load_format)
def offload_model_weights(self) -> None:
self.llm_engine.offload_model_weights()
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/llm_engine_sp.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/engine/llm_engine.py
from functools import partial
from typing import Callable, Dict, Optional, Type, Union
import torch
import torch.nn as nn
from vllm.config import (
CacheConfig,
DecodingConfig,
DeviceConfig,
EngineConfig,
LoadConfig,
LoRAConfig,
ModelConfig,
ObservabilityConfig,
ParallelConfig,
PromptAdapterConfig,
SchedulerConfig,
SpeculativeConfig,
)
from vllm.core.scheduler import Scheduler
from vllm.engine.arg_utils import EngineArgs
from vllm.engine.llm_engine import LLMEngine, SchedulerContext, SchedulerOutputState, _load_generation_config_dict
from vllm.engine.metrics_types import StatLoggerBase
from vllm.engine.output_processor.interfaces import SequenceGroupOutputProcessor
from vllm.engine.output_processor.stop_checker import StopChecker
from vllm.executor.executor_base import ExecutorBase
from vllm.inputs import INPUT_REGISTRY, InputRegistry
from vllm.inputs.preprocess import InputPreprocessor
from vllm.logger import init_logger
from vllm.sequence import Sequence
from vllm.tracing import init_tracer
from vllm.transformers_utils.detokenizer import Detokenizer
from vllm.transformers_utils.tokenizer import AnyTokenizer
from vllm.usage.usage_lib import UsageContext, is_usage_stats_enabled, usage_message
from vllm.utils import Counter, weak_bind
from vllm.version import __version__ as VLLM_VERSION
from .arg_utils import EngineArgs
from .config import LoadConfig, ModelConfig
from .tokenizer import TokenizerGroup
logger = init_logger(__name__)
_LOCAL_LOGGING_INTERVAL_SEC = 5
class LLMEngine(LLMEngine):
"""An LLM engine that receives requests and generates texts.
This is the main class for the vLLM engine. It receives requests
from clients and generates texts from the LLM. It includes a tokenizer, a
language model (possibly distributed across multiple GPUs), and GPU memory
space allocated for intermediate states (aka KV cache). This class utilizes
iteration-level scheduling and efficient memory management to maximize the
serving throughput.
The :class:`~vllm.LLM` class wraps this class for offline batched inference
and the :class:`AsyncLLMEngine` class wraps this class for online serving.
The config arguments are derived from :class:`~vllm.EngineArgs`. (See
:ref:`engine_args`)
Args:
model_config: The configuration related to the LLM model.
cache_config: The configuration related to the KV cache memory
management.
parallel_config: The configuration related to distributed execution.
scheduler_config: The configuration related to the request scheduler.
device_config: The configuration related to the device.
lora_config (Optional): The configuration related to serving multi-LoRA.
speculative_config (Optional): The configuration related to speculative
decoding.
executor_class: The model executor class for managing distributed
execution.
prompt_adapter_config (Optional): The configuration related to serving
prompt adapters.
log_stats: Whether to log statistics.
usage_context: Specified entry point, used for usage info collection.
"""
def __init__(
self,
# NOTE(sgm): first two arguments are added for verl
model: Union[nn.Module, Dict], # model itself or its parameter dict
tokenizer: nn.Module,
# NOTE(sgm): vllm original arguments
model_config: ModelConfig,
cache_config: CacheConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
load_config: LoadConfig,
lora_config: Optional[LoRAConfig],
speculative_config: Optional[SpeculativeConfig],
decoding_config: Optional[DecodingConfig],
observability_config: Optional[ObservabilityConfig],
prompt_adapter_config: Optional[PromptAdapterConfig],
executor_class: Type[ExecutorBase],
log_stats: bool,
usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
stat_loggers: Optional[Dict[str, StatLoggerBase]] = None,
input_registry: InputRegistry = INPUT_REGISTRY,
use_cached_outputs: bool = False,
) -> None:
logger.info(
"Initializing an LLM engine (v%s) with config: "
"model=%r, speculative_config=%r, tokenizer=%r, "
"skip_tokenizer_init=%s, tokenizer_mode=%s, revision=%s, "
"override_neuron_config=%s, "
"rope_scaling=%r, rope_theta=%r, tokenizer_revision=%s, "
"trust_remote_code=%s, dtype=%s, max_seq_len=%d, "
"download_dir=%r, load_format=%s, tensor_parallel_size=%d, "
"pipeline_parallel_size=%d, "
"disable_custom_all_reduce=%s, quantization=%s, "
"enforce_eager=%s, kv_cache_dtype=%s, "
"quantization_param_path=%s, device_config=%s, "
"decoding_config=%r, observability_config=%r, "
"seed=%d, served_model_name=%s, use_v2_block_manager=%s, "
"num_scheduler_steps=%d, chunked_prefill_enabled=%s "
"multi_step_stream_outputs=%s, enable_prefix_caching=%s, "
"use_async_output_proc=%s, use_cached_outputs=%s, "
"mm_processor_kwargs=%s)",
VLLM_VERSION,
model_config.model,
speculative_config,
model_config.tokenizer,
model_config.skip_tokenizer_init,
model_config.tokenizer_mode,
model_config.revision,
model_config.override_neuron_config,
model_config.rope_scaling,
model_config.rope_theta,
model_config.tokenizer_revision,
model_config.trust_remote_code,
model_config.dtype,
model_config.max_model_len,
load_config.download_dir,
load_config.load_format,
parallel_config.tensor_parallel_size,
parallel_config.pipeline_parallel_size,
parallel_config.disable_custom_all_reduce,
model_config.quantization,
model_config.enforce_eager,
cache_config.cache_dtype,
model_config.quantization_param_path,
device_config.device,
decoding_config,
observability_config,
model_config.seed,
model_config.served_model_name,
scheduler_config.use_v2_block_manager,
scheduler_config.num_scheduler_steps,
scheduler_config.chunked_prefill_enabled,
scheduler_config.multi_step_stream_outputs,
cache_config.enable_prefix_caching,
model_config.use_async_output_proc,
use_cached_outputs,
model_config.mm_processor_kwargs,
)
# TODO(woosuk): Print more configs in debug mode.
self.model_config = model_config
self.cache_config = cache_config
self.lora_config = lora_config
self.parallel_config = parallel_config
self.scheduler_config = scheduler_config
self.device_config = device_config
self.speculative_config = speculative_config
self.load_config = load_config
self.decoding_config = decoding_config or DecodingConfig()
self.prompt_adapter_config = prompt_adapter_config
self.observability_config = observability_config or ObservabilityConfig()
self.log_stats = log_stats
self.use_cached_outputs = use_cached_outputs
if not self.model_config.skip_tokenizer_init:
self.tokenizer = self._init_tokenizer(tokenizer)
self.detokenizer = Detokenizer(self.tokenizer)
tokenizer_group = self.get_tokenizer_group()
else:
self.tokenizer = None
self.detokenizer = None
tokenizer_group = None
# Ensure that the function doesn't contain a reference to self,
# to avoid engine GC issues
def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer:
assert tokenizer_group, "tokenizer_group cannot be None, " "make sure skip_tokenizer_init is False"
return tokenizer_group.get_lora_tokenizer(sequence.lora_request)
self.seq_counter = Counter()
self.generation_config_fields = _load_generation_config_dict(model_config)
self.input_preprocessor = InputPreprocessor(model_config, self.tokenizer)
self.input_registry = input_registry
self.input_processor = input_registry.create_input_processor(model_config)
self.model_executor = executor_class(
model=model, # add for spmd_gpu_executor
model_config=model_config,
cache_config=cache_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config,
device_config=device_config,
lora_config=lora_config,
speculative_config=speculative_config,
load_config=load_config,
prompt_adapter_config=prompt_adapter_config,
observability_config=self.observability_config,
)
if not self.model_config.embedding_mode:
self._initialize_kv_caches()
# If usage stat is enabled, collect relevant info.
if is_usage_stats_enabled():
from vllm.model_executor.model_loader import get_architecture_class_name
usage_message.report_usage(
get_architecture_class_name(model_config),
usage_context,
extra_kvs={
# Common configuration
"dtype": str(model_config.dtype),
"tensor_parallel_size": parallel_config.tensor_parallel_size,
"block_size": cache_config.block_size,
"gpu_memory_utilization": cache_config.gpu_memory_utilization,
# Quantization
"quantization": model_config.quantization,
"kv_cache_dtype": str(cache_config.cache_dtype),
# Feature flags
"enable_lora": bool(lora_config),
"enable_prompt_adapter": bool(prompt_adapter_config),
"enable_prefix_caching": cache_config.enable_prefix_caching,
"enforce_eager": model_config.enforce_eager,
"disable_custom_all_reduce": parallel_config.disable_custom_all_reduce,
},
)
if self.tokenizer:
# Ping the tokenizer to ensure liveness if it runs in a
# different process.
self.tokenizer.ping()
self.cached_scheduler_outputs = [
SchedulerOutputState() for _ in range(self.parallel_config.pipeline_parallel_size)
]
self.scheduler_contexts = [
SchedulerContext(multi_step_stream_outputs=self.scheduler_config.multi_step_stream_outputs)
for _ in range(self.parallel_config.pipeline_parallel_size)
]
if model_config.use_async_output_proc:
process_model_outputs = weak_bind(self._process_model_outputs)
self.async_callbacks = [
partial(process_model_outputs, ctx=self.scheduler_contexts[v_id])
for v_id in range(self.parallel_config.pipeline_parallel_size)
]
else:
self.async_callbacks = []
# Currently used by AsyncLLMEngine to ensure quick append
# of request outputs to asyncio queues
self.process_request_outputs_callback: Optional[Callable] = None
# Create the scheduler.
# NOTE: the cache_config here have been updated with the numbers of
# GPU and CPU blocks, which are profiled in the distributed executor.
self.scheduler = [
Scheduler(
scheduler_config,
cache_config,
lora_config,
parallel_config.pipeline_parallel_size,
self.async_callbacks[v_id] if model_config.use_async_output_proc else None,
) for v_id in range(parallel_config.pipeline_parallel_size)
]
# Metric Logging.
if self.log_stats:
if stat_loggers is not None:
self.stat_loggers = stat_loggers
else:
# Lazy import for prometheus multiprocessing.
# We need to set PROMETHEUS_MULTIPROC_DIR environment variable
# before prometheus_client is imported.
# See https://prometheus.github.io/client_python/multiprocess/
from vllm.engine.metrics import LoggingStatLogger, PrometheusStatLogger
self.stat_loggers = {
"logging":
LoggingStatLogger(local_interval=_LOCAL_LOGGING_INTERVAL_SEC),
"prometheus":
PrometheusStatLogger(
local_interval=_LOCAL_LOGGING_INTERVAL_SEC,
labels=dict(model_name=model_config.served_model_name),
max_model_len=self.model_config.max_model_len,
),
}
self.stat_loggers["prometheus"].info("cache_config", self.cache_config)
self.tracer = None
if self.observability_config.otlp_traces_endpoint:
self.tracer = init_tracer("vllm.llm_engine", self.observability_config.otlp_traces_endpoint)
# Create sequence output processor, e.g. for beam search or
# speculative decoding.
self.output_processor = SequenceGroupOutputProcessor.create_output_processor(
self.scheduler_config,
self.detokenizer,
self.scheduler,
self.seq_counter,
get_tokenizer_for_seq,
stop_checker=StopChecker(
self.scheduler_config.max_model_len,
get_tokenizer_for_seq,
),
)
# TODO(sgm): add for verl but we may not tokenizer in Rollout
def _init_tokenizer(self, tokenizer, **tokenizer_init_kwargs):
init_kwargs = dict(enable_lora=bool(self.lora_config),
max_num_seqs=self.scheduler_config.max_num_seqs,
max_input_length=None)
init_kwargs.update(tokenizer_init_kwargs)
return TokenizerGroup(tokenizer, **init_kwargs)
def init_cache_engine(self):
# TODO: check whether we should rebuild the CUDAGraph every iter when offload/load KVCache
# Re-capture CUDAGraph would be time-consuming
self.model_executor.init_cache_engine()
def free_cache_engine(self):
self.model_executor.free_cache_engine()
# NOTE(sgm): currently, we only support GPU executor
# The GPUExecutor remove the Ray dependency
@classmethod
def _get_executor_cls(cls, engine_config: EngineConfig) -> Type[ExecutorBase]:
distributed_executor_backend = engine_config.parallel_config.distributed_executor_backend
# Initialize the cluster and specify the executor class.]
assert (engine_config.device_config.device_type == "cuda"
), "Currently, the vllm in verl only support running on GPU"
# print('Waiting for debugger'); import os,debugpy; debugpy.listen(('localhost', 5678 + int(os.getenv('RANK', '0')))); debugpy.wait_for_client()
if engine_config.parallel_config.world_size == 1:
engine_config.load_config.load_format = "dummy_hf"
from .spmd_gpu_executor import SPMDGPUExecutor
executor_class = SPMDGPUExecutor
return executor_class
@classmethod
def from_engine_args(
cls,
model,
tokenizer,
engine_args: EngineArgs,
usage_context: UsageContext = UsageContext.ENGINE_CONTEXT,
stat_loggers: Optional[Dict[str, StatLoggerBase]] = None,
) -> "LLMEngine":
"""Creates an LLM engine from the engine arguments."""
# Create the engine configs.
engine_config = engine_args.create_engine_config()
executor_class = cls._get_executor_cls(engine_config)
# Initialize the cluster and specify the executor class.
assert (engine_config.device_config.device_type == "cuda"
), "Currently, the vllm in verl only support running on GPU"
from .spmd_gpu_executor import SPMDGPUExecutor
executor_class = SPMDGPUExecutor
# Create the LLM engine.
engine = cls(
model,
tokenizer,
**engine_config.to_dict(),
executor_class=executor_class,
log_stats=not engine_args.disable_log_stats,
usage_context=usage_context,
stat_loggers=stat_loggers,
)
return engine
def sync_model_weights(self, actor_weights: Dict[str, torch.Tensor], load_format: str) -> None:
self.model_executor.sync_model_weights(actor_weights=actor_weights, load_format=load_format)
def offload_model_weights(self) -> None:
self.model_executor.offload_model_weights()
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/megatron_weight_loaders.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/model_loader
from typing import Dict
import torch
import torch.nn as nn
from vllm.model_executor.layers.linear import *
from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead, VocabParallelEmbedding
from vllm.model_executor.models import ModelRegistry
# NOTE(shengguangming): replace the origin weight loader function in the class
def parallel_weight_loader(self, param: torch.Tensor, loaded_weight: torch.Tensor) -> None:
"""Parallel Linear weight loader."""
assert (param.size() == loaded_weight.size(
)), "the parameter size is not align with the loaded weight size, param size: {}, loaded_weight size: {}".format(
param.size(), loaded_weight.size())
assert (param.data.dtype == loaded_weight.data.dtype
), "if we want to shared weights, the data type should also be the same"
param.data = loaded_weight.data
def default_weight_loader(param: torch.Tensor, loaded_weight: torch.Tensor) -> None:
"""Default weight loader."""
assert param.size() == loaded_weight.size()
assert (param.data.dtype == loaded_weight.data.dtype
), "if we want to shared weights, the data type should also be the same"
param.data = loaded_weight.data
def gpt2_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_dict = dict(vllm_model.named_parameters(remove_duplicate=False))
for name, loaded_weight in actor_weights.items():
if "lm_head.weight" in name:
# GPT-2 ties the weights of the embedding layer and the final
# linear layer.
continue
if ".attn.bias" in name or ".attn.masked_bias" in name:
# Skip attention mask.
# NOTE: "c_attn.bias" should not be skipped.
continue
if not name.startswith("transformer."):
name = "transformer." + name
param = params_dict[name]
# The HF's GPT-2 implementation uses Conv1D instead of Linear.
# Because of this, we need to transpose the weights.
# Note(zhuohan): the logic below might break quantized models.
for conv1d_weight_name in ["c_attn", "c_proj", "c_fc"]:
if conv1d_weight_name not in name:
continue
if not name.endswith(".weight"):
continue
# TODO: check megatron
loaded_weight = loaded_weight.t()
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def llama_megatron_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def qwen2_megatron_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
if vllm_model.config.tie_word_embeddings and "lm_head.weight" in name:
continue
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def llama_megatron_core_te_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_mapping = [
# (megatron core gpt model name, vllm model name)
("embedding.word_embeddings", "model.embed_tokens"),
("self_attention.linear_qkv.layer_norm_weight", "input_layernorm.weight"),
("self_attention.linear_qkv.layer_norm_bias", "input_layernorm.bias"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_proj", "self_attn.o_proj"),
("pre_mlp_layernorm", "post_attention_layernorm"),
("mlp.linear_fc1.layer_norm_weight", "post_attention_layernorm.weight"),
("mlp.linear_fc1.layer_norm_bias", "post_attention_layernorm.bias"),
("mlp.linear_fc1", "mlp.gate_up_proj"),
("mlp.linear_fc2", "mlp.down_proj"),
("decoder.final_layernorm", "model.norm"),
("output_layer", "lm_head"),
]
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
name = _replace_name(name, params_mapping)
if name.endswith(".bias") and name not in params_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def llama_megatron_core_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_mapping = [
# (megatron core gpt model name, vllm model name)
("embedding.word_embeddings", "model.embed_tokens"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_proj", "self_attn.o_proj"),
(
"input_layernorm",
"input_layernorm",
),
("pre_mlp_layernorm", "post_attention_layernorm"),
("mlp.linear_fc1", "mlp.gate_up_proj"),
("mlp.linear_fc2", "mlp.down_proj"),
("decoder.final_layernorm", "model.norm"),
("output_layer", "lm_head"),
]
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
name = _replace_name(name, params_mapping)
if name.endswith(".bias") and name not in params_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def _replace_name(megatron_name, name_mapping):
for m_name, v_name in name_mapping:
if m_name not in megatron_name:
continue
if "layers" in megatron_name: # deal with decoder layers
megatron_name = megatron_name.replace("decoder", "model")
megatron_name_list = megatron_name.split(".")
if "layer_norm_weight" in megatron_name_list or "layer_norm_bias" in megatron_name_list:
param_name_list = megatron_name_list[:3]
param_name_list.append(v_name)
param_name = ".".join(param_name_list)
else:
param_name_list = megatron_name_list[:3]
weight_or_bias = megatron_name_list[-1]
param_name_list.append(v_name)
param_name_list.append(weight_or_bias)
param_name = ".".join(param_name_list)
return param_name
else:
param_name = megatron_name.replace(m_name, v_name)
return param_name
def llama_megatron_core_te_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_mapping = [
# (megatron core gpt model name, vllm model name)
("embedding.word_embeddings", "model.embed_tokens"),
("self_attention.linear_qkv.layer_norm_weight", "input_layernorm.weight"),
("self_attention.linear_qkv.layer_norm_bias", "input_layernorm.bias"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_proj", "self_attn.o_proj"),
("pre_mlp_layernorm", "post_attention_layernorm"),
("mlp.linear_fc1.layer_norm_weight", "post_attention_layernorm.weight"),
("mlp.linear_fc1.layer_norm_bias", "post_attention_layernorm.bias"),
("mlp.linear_fc1", "mlp.gate_up_proj"),
("mlp.linear_fc2", "mlp.down_proj"),
("decoder.final_layernorm", "model.norm"),
("output_layer", "lm_head"),
]
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
name = _replace_name(name, params_mapping)
if name.endswith(".bias") and name not in params_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def llama_megatron_core_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
params_mapping = [
# (megatron core gpt model name, vllm model name)
("embedding.word_embeddings", "model.embed_tokens"),
("self_attention.linear_qkv", "self_attn.qkv_proj"),
("self_attention.linear_proj", "self_attn.o_proj"),
(
"input_layernorm",
"input_layernorm",
),
("pre_mlp_layernorm", "post_attention_layernorm"),
("mlp.linear_fc1", "mlp.gate_up_proj"),
("mlp.linear_fc2", "mlp.down_proj"),
("decoder.final_layernorm", "model.norm"),
("output_layer", "lm_head"),
]
# NOTE(shengguangming): the megatron llama may have this prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
name = _replace_name(name, params_mapping)
if name.endswith(".bias") and name not in params_dict:
continue
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
def _replace_name(megatron_name, name_mapping):
for m_name, v_name in name_mapping:
if m_name not in megatron_name:
continue
if "layers" in megatron_name: # deal with decoder layers
megatron_name = megatron_name.replace("decoder", "model")
megatron_name_list = megatron_name.split(".")
if "layer_norm_weight" in megatron_name_list or "layer_norm_bias" in megatron_name_list:
param_name_list = megatron_name_list[:3]
param_name_list.append(v_name)
param_name = ".".join(param_name_list)
else:
param_name_list = megatron_name_list[:3]
weight_or_bias = megatron_name_list[-1]
param_name_list.append(v_name)
param_name_list.append(weight_or_bias)
param_name = ".".join(param_name_list)
return param_name
else:
param_name = megatron_name.replace(m_name, v_name)
return param_name
def mistral_megatron_weight_loader(actor_weights: Dict, vllm_model: nn.Module) -> nn.Module:
# TODO: need to implement a general way to deal with prefix
params_dict = dict(vllm_model.named_parameters())
for name, loaded_weight in actor_weights.items():
if "rotary_emb.inv_freq" in name:
continue
else:
param = params_dict[name]
weight_loader = getattr(param, "weight_loader", default_weight_loader)
weight_loader(param, loaded_weight)
__LAYER_WEIGHT_MEGATRON_LOADER_REGISTRY__ = {
ColumnParallelLinear: parallel_weight_loader,
MergedColumnParallelLinear: parallel_weight_loader,
QKVParallelLinear: parallel_weight_loader,
RowParallelLinear: parallel_weight_loader,
VocabParallelEmbedding: parallel_weight_loader,
ParallelLMHead: parallel_weight_loader,
# "ScaledActivation.weight_loader": ScaledActivation, # TODO(shengguangming): latest commit in vllm fix awq for this function and add load_weights
# "default_weight_loader": default_weight_loader
}
# for layer_class, weight_loader in __LAYER_WEIGHT_MEGATRON_LOADER_REGISTRY__.items():
# # setattr(layer_class, 'megatron_weight_loader', weight_loader)
# layer_class.weight_loader = weight_loader
__MODEL_MEGATRON_WEIGHT_LOADER_REGISTRY__ = {
"GPT2LMHeadModel": gpt2_weight_loader,
"LlamaForCausalLM": llama_megatron_weight_loader, # use te backend for open-source megatron
"LLaMAForCausalLM": llama_megatron_weight_loader,
"MistralForCausalLM": mistral_megatron_weight_loader,
'Qwen2ForCausalLM': qwen2_megatron_weight_loader,
}
# the actor model is .state_dict()
# Load megatron weights
def load_megatron_weights(actor_weights: Dict, vllm_model: nn.Module):
weight_loader = _get_model_weight_loader(vllm_model.__class__.__name__)
weight_loader(actor_weights, vllm_model)
# NOTE(sgm) to reduce peak memory usage, we offload vllm model to cpu
# after init, and we need this after sync model weights for in first iter.
vllm_model = vllm_model.cuda()
def _get_model_weight_loader(arch: str):
if arch in __MODEL_MEGATRON_WEIGHT_LOADER_REGISTRY__:
return __MODEL_MEGATRON_WEIGHT_LOADER_REGISTRY__[arch]
raise ValueError(f"Model architectures {arch} are not supported for now. "
f"Supported architectures: {ModelRegistry.get_supported_archs()}")
def update_megatron_weight_loader():
for layer_class, weight_loader in __LAYER_WEIGHT_MEGATRON_LOADER_REGISTRY__.items():
layer_class.weight_loader = weight_loader
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/model_loader.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/tree/main/vllm/model_executor/models
"""Utilities for selecting and loading models."""
from typing import Dict, Optional, Union
import torch
import torch.nn as nn
from transformers import PreTrainedModel
from vllm.config import CacheConfig, DeviceConfig, LoadConfig, LoRAConfig, ModelConfig, ParallelConfig, SchedulerConfig
from vllm.distributed.communication_op import tensor_model_parallel_all_gather
from vllm.model_executor.model_loader import BaseModelLoader
from vllm.model_executor.model_loader.loader import _initialize_model
from vllm.model_executor.model_loader.utils import set_default_torch_dtype
from .config import LoadConfig, LoadFormat, ModelConfig
from .dtensor_weight_loaders import load_dtensor_weights, update_dtensor_weight_loader
from .hf_weight_loader import update_hf_weight_loader
from .megatron_weight_loaders import load_megatron_weights, update_megatron_weight_loader
def get_model(
actor_model: Union[PreTrainedModel, Dict],
model_config: ModelConfig,
load_config: LoadConfig,
device_config: DeviceConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
lora_config: Optional[LoRAConfig],
cache_config: CacheConfig = None,
) -> nn.Module:
loader = get_model_loader(load_config)
if load_config.load_format.startswith("dummy"):
return loader.load_model(
model_config=model_config,
device_config=device_config,
lora_config=lora_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config,
cache_config=cache_config,
)
else:
return loader.load_model(
actor_model=actor_model,
model_config=model_config,
device_config=device_config,
lora_config=lora_config,
parallel_config=parallel_config,
scheduler_config=scheduler_config,
cache_config=cache_config,
)
def get_model_loader(load_config: LoadConfig) -> BaseModelLoader:
"""Get a model loader based on the load format."""
if isinstance(load_config.load_format, type):
return load_config.load_format(load_config)
if load_config.load_format == LoadFormat.AUTO:
update_megatron_weight_loader()
return MegatronLoader(load_config)
# NOTE(sgm): change the weight_loader function in runtime
if load_config.load_format == LoadFormat.MEGATRON:
update_megatron_weight_loader()
return MegatronLoader(load_config)
if load_config.load_format == LoadFormat.HF:
update_hf_weight_loader()
return HFLoader(load_config)
if load_config.load_format == LoadFormat.DTENSOR:
update_dtensor_weight_loader()
return DTensorLoader(load_config)
if load_config.load_format == LoadFormat.DUMMY_HF:
update_hf_weight_loader()
return DummyModelLoader(load_config)
if load_config.load_format == LoadFormat.DUMMY_MEGATRON:
update_megatron_weight_loader()
return DummyModelLoader(load_config)
if load_config.load_format == LoadFormat.DUMMY_DTENSOR:
update_dtensor_weight_loader()
return DummyModelLoader(load_config)
raise ValueError("load format not supported in verl: {}, only support {} and {}".format(
load_config.load_format, LoadFormat.MEGATRON, LoadFormat.HF))
class DummyModelLoader(BaseModelLoader):
"""Model loader that will set model weights to random values."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(f"Model loader extra config is not supported for "
f"load format {load_config.load_format}")
def download_model(self, model_config: ModelConfig) -> None:
pass
def load_model(
self,
*,
model_config: ModelConfig,
device_config: DeviceConfig,
lora_config: Optional[LoRAConfig],
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
cache_config: CacheConfig,
) -> nn.Module:
with set_default_torch_dtype(model_config.dtype):
with torch.device(device_config.device):
model = _initialize_model(model_config, self.load_config, lora_config, cache_config, scheduler_config)
# NOTE(woosuk): For accurate performance evaluation, we assign
# random values to the weights.
# initialize_dummy_weights(model)
return model.eval()
class MegatronLoader(BaseModelLoader):
"""Model loader that can load the model weights from partitioned megatron model."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(f"Model loader extra config is not supported for "
f"load format {load_config.load_format}")
def download_model(self, model_config: ModelConfig) -> None:
pass # Nothing to download
def _get_weights_iterator(actor_model: Union[PreTrainedModel, Dict]):
# NOTE(shengguangming) Load the weights from the actor model
pass
# if isinstance(actor_model, nn.Module):
# load_weights(actor_weights=dict(actor_model.named_parameters(remove_duplicate=False)), vllm_model=model)
# else:
# load_weights(actor_weights=actor_model, vllm_model=model)
# return actor_model
def load_model(
self,
actor_model: Union[PreTrainedModel, Dict],
model_config: ModelConfig,
device_config: DeviceConfig,
lora_config: Optional[LoRAConfig],
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
cache_config: CacheConfig,
) -> nn.Module:
with set_default_torch_dtype(model_config.dtype):
with torch.device(device_config.device):
model = _initialize_model(model_config, self.load_config, lora_config, cache_config, scheduler_config)
# TODO(sgm): This is a hack, we need to register the load_weight() func for each model in vllm
if isinstance(actor_model, nn.Module):
load_megatron_weights(actor_weights=dict(actor_model.named_parameters(remove_duplicate=False)),
vllm_model=model)
else:
load_megatron_weights(actor_weights=actor_model, vllm_model=model)
for _, module in model.named_modules():
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_method.process_weights_after_loading(module)
# FIXME: Remove this after Mixtral is updated
# to use quant_method.
if hasattr(module, "process_weights_after_loading"):
module.process_weights_after_loading()
# NOTE(sgm) Some weights are point to gpu, but still need this.
model = model.cuda() # NOTE (zhangchi.usc1992) We need this for vllm to profile memory usage
return model.eval()
class HFLoader(BaseModelLoader):
"""Model loader that can load the model weights from model's full params."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(f"Model loader extra config is not supported for "
f"load format {load_config.load_format}")
def download_model(self, model_config: ModelConfig) -> None:
pass # Nothing to download
def _get_weights_iterator(self, actor_model: Union[PreTrainedModel, Dict]):
if isinstance(actor_model, Dict):
return actor_model.items()
elif isinstance(actor_model, nn.Module):
return dict(actor_model.named_parameters()).items()
else:
raise ValueError(f"actor model should be Dict or nn.Module, but get {type(actor_model)}")
def load_model(
self,
actor_model: Union[PreTrainedModel, Dict],
model_config: ModelConfig,
device_config: DeviceConfig,
lora_config: Optional[LoRAConfig],
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
cache_config: CacheConfig,
) -> nn.Module:
with set_default_torch_dtype(model_config.dtype):
# with torch.device(device_config.device):
# NOTE(sgm): init the model in cpu
model = _initialize_model(model_config, self.load_config, lora_config, cache_config, scheduler_config)
model.load_weights(self._get_weights_iterator(actor_model))
for _, module in model.named_modules():
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_method.process_weights_after_loading(module)
# FIXME: Remove this after Mixtral is updated
# to use quant_method.
if hasattr(module, "process_weights_after_loading"):
module.process_weights_after_loading()
# NOTE(sgm) Some weights are point to gpu, but still need this.
model = model.cuda() # NOTE (zhangchi.usc1992) We need this for vllm to profile memory usage
return model.eval()
class DTensorLoader(BaseModelLoader):
"""Model loader that can load the model weights from partitioned megatron model."""
def __init__(self, load_config: LoadConfig):
super().__init__(load_config)
if load_config.model_loader_extra_config:
raise ValueError(f"Model loader extra config is not supported for "
f"load format {load_config.load_format}")
def download_model(self, model_config: ModelConfig) -> None:
pass # Nothing to download
def _get_weights_iterator(actor_model: Union[PreTrainedModel, Dict]):
# NOTE(shengguangming) Load the weights from the actor model
pass
# if isinstance(actor_model, nn.Module):
# load_weights(actor_weights=dict(actor_model.named_parameters(remove_duplicate=False)), vllm_model=model)
# else:
# load_weights(actor_weights=actor_model, vllm_model=model)
# return actor_model
def load_model(
self,
actor_model: Union[PreTrainedModel, Dict],
model_config: ModelConfig,
device_config: DeviceConfig,
lora_config: Optional[LoRAConfig],
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
cache_config: CacheConfig,
) -> nn.Module:
with set_default_torch_dtype(model_config.dtype):
with torch.device(device_config.device):
model = _initialize_model(model_config, self.load_config, lora_config, cache_config, scheduler_config)
# TODO(sgm): This is a hack, we need to register the load_weight() func for each model in vllm
if isinstance(actor_model, nn.Module):
load_dtensor_weights(actor_weights=dict(actor_model.named_parameters(remove_duplicate=False)),
vllm_model=model)
else:
load_dtensor_weights(actor_weights=actor_model, vllm_model=model)
for _, module in model.named_modules():
quant_method = getattr(module, "quant_method", None)
if quant_method is not None:
quant_method.process_weights_after_loading(module)
# FIXME: Remove this after Mixtral is updated
# to use quant_method.
if hasattr(module, "process_weights_after_loading"):
module.process_weights_after_loading()
# NOTE(sgm) Some weights are point to gpu, but still need this.
model = model.cuda() # NOTE (zhangchi.usc1992) We need this for vllm to profile memory usage
return model.eval()
# FIXME(sgm): hack the _get_logits function in vllm v0.4.2
# as they use ray, the _get_logits result will only need to return to the driver node,
# therefore gather is enough. However, we use SPMD instead of a central scheduler,
# all_gather is required (aligned with v0.2.6)
def _get_logits(self, hidden_states: torch.Tensor, embedding: torch.Tensor,
embedding_bias: Optional[torch.Tensor]) -> torch.Tensor:
# Get the logits for the next tokens.
logits = torch.matmul(hidden_states, embedding.t())
if embedding_bias is not None:
logits += embedding_bias
logits = tensor_model_parallel_all_gather(logits)
# Remove paddings in vocab (if any).
if logits is not None:
logits = logits[:, :self.org_vocab_size]
return logits
from vllm.model_executor.layers.logits_processor import LogitsProcessor
def logitsprocessor_init(
self,
vocab_size: int,
org_vocab_size: Optional[int] = None,
scale: float = 1.0,
logits_as_input: bool = False,
soft_cap: Optional[float] = None,
) -> None:
"""
Args:
scale: A scaling factor to apply to the logits.
"""
super(LogitsProcessor, self).__init__()
self.scale = scale
self.vocab_size = vocab_size
# Whether the input is logits (default is hidden states).
self.logits_as_input = logits_as_input
# original vocabulary size (without LoRA).
self.org_vocab_size = org_vocab_size or vocab_size
# Soft cap the logits. Used in Gemma 2.
self.soft_cap = soft_cap
# Whether to use gather or all-gather to gather the logits.
self.use_gather = False
LogitsProcessor.__init__ = logitsprocessor_init # use all_gather
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/model_runner.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/worker/model_runner.py
import warnings
from enum import IntEnum
from typing import Dict, Optional, Union
import torch
import torch.nn as nn
import vllm.envs as envs
from vllm.compilation.levels import CompilationLevel
from vllm.config import (
CacheConfig,
DeviceConfig,
LoadConfig,
LoRAConfig,
ModelConfig,
ObservabilityConfig,
ParallelConfig,
PromptAdapterConfig,
SchedulerConfig,
)
from vllm.inputs import INPUT_REGISTRY, InputRegistry
from vllm.logger import init_logger
from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager
from vllm.model_executor.models.interfaces import supports_lora
from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry
from vllm.prompt_adapter.worker_manager import LRUCacheWorkerPromptAdapterManager
from vllm.utils import DeviceMemoryProfiler, is_hip, supports_dynamo
from vllm.worker.model_runner import ModelRunner
from .config import LoadConfig, ModelConfig
from .model_loader import get_model
logger = init_logger(__name__)
# How batches are constructed.
class BatchType(IntEnum):
# Every batch is prefill.
PREFILL = 0
# Every batch is decode.
DECODE = 1
# Batch is a mixture of prefill and decode.
MIXED = 2
class ModelRunner(ModelRunner):
def __init__(
self,
model: Union[nn.Module, Dict], # [verl] model itself or its parameter dict
model_config: ModelConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
cache_config: CacheConfig,
load_config: LoadConfig,
lora_config: Optional[LoRAConfig],
kv_cache_dtype: Optional[str] = "auto",
is_driver_worker: bool = False,
prompt_adapter_config: Optional[PromptAdapterConfig] = None,
return_hidden_states: bool = False,
observability_config: Optional[ObservabilityConfig] = None,
input_registry: InputRegistry = INPUT_REGISTRY,
mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY,
):
super().__init__(
model_config,
parallel_config,
scheduler_config,
device_config,
cache_config,
load_config,
lora_config,
kv_cache_dtype,
is_driver_worker=True, # a hack
prompt_adapter_config=prompt_adapter_config,
return_hidden_states=return_hidden_states,
observability_config=observability_config,
input_registry=input_registry,
mm_registry=mm_registry,
)
# NOTE(sgm): add for verl
self.model = model # this will be replaced by get_model()
def load_model(self) -> None:
logger.info("Starting to load model %s...", self.model_config.model)
with DeviceMemoryProfiler() as m:
self.model = get_model(
self.model,
model_config=self.model_config,
device_config=self.device_config,
load_config=self.load_config,
lora_config=self.lora_config,
parallel_config=self.parallel_config,
scheduler_config=self.scheduler_config,
cache_config=self.cache_config,
)
self.model_memory_usage = m.consumed_memory
logger.info("Loading model weights took %.4f GB", self.model_memory_usage / float(2**30))
if self.lora_config:
assert supports_lora(self.model), f"{self.model.__class__.__name__} does not support LoRA yet."
if supports_multimodal(self.model):
logger.warning("Regarding multimodal models, vLLM currently "
"only supports adding LoRA to language model.")
# It's necessary to distinguish between the max_position_embeddings
# of VLMs and LLMs.
if hasattr(self.model.config, "max_position_embeddings"):
max_pos_embeddings = self.model.config.max_position_embeddings
else:
max_pos_embeddings = self.model.config.text_config.max_position_embeddings
self.lora_manager = LRUCacheWorkerLoRAManager(
self.scheduler_config.max_num_seqs,
self.scheduler_config.max_num_batched_tokens,
self.vocab_size,
self.lora_config,
self.device,
self.model.embedding_modules,
self.model.embedding_padding_modules,
max_position_embeddings=max_pos_embeddings,
)
self.model = self.lora_manager.create_lora_manager(self.model)
if self.prompt_adapter_config:
self.prompt_adapter_manager = LRUCacheWorkerPromptAdapterManager(
self.scheduler_config.max_num_seqs,
self.scheduler_config.max_num_batched_tokens,
self.device,
self.prompt_adapter_config,
)
self.model = self.prompt_adapter_manager.create_prompt_adapter_manager(self.model)
if self.kv_cache_dtype == "fp8" and is_hip():
# Currently only ROCm accepts kv-cache scaling factors
# via quantization_param_path and this will be deprecated
# in the future.
if self.model_config.quantization_param_path is not None:
if callable(getattr(self.model, "load_kv_cache_scales", None)):
warnings.warn(
"Loading kv cache scaling factor from JSON is "
"deprecated and will be removed. Please include "
"kv cache scaling factors in the model checkpoint.",
FutureWarning,
stacklevel=2,
)
self.model.load_kv_cache_scales(self.model_config.quantization_param_path)
logger.info("Loaded KV cache scaling factors from %s", self.model_config.quantization_param_path)
else:
raise RuntimeError(
"Using FP8 KV cache and scaling factors provided but "
"model %s does not support loading scaling factors.",
self.model.__class__,
)
else:
logger.warning("Using FP8 KV cache but no scaling factors "
"provided. Defaulting to scaling factors of 1.0. "
"This may lead to less accurate results!")
if envs.VLLM_TORCH_COMPILE_LEVEL == CompilationLevel.DYNAMO_AS_IS and supports_dynamo():
from vllm.plugins import get_torch_compile_backend
backend = get_torch_compile_backend() or "eager"
self.model = torch.compile(self.model, fullgraph=envs.VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE, backend=backend)
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/parallel_state.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Adapted from
# https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/parallel_state.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
"""Model and data parallel groups."""
import os
from typing import Optional
import torch
import torch.distributed
import vllm.distributed.parallel_state as ps
from vllm.distributed.parallel_state import (
get_pp_group,
get_world_group,
init_distributed_environment,
init_model_parallel_group,
)
from vllm.logger import init_logger
logger = init_logger(__name__)
"""
This version is strongly tied with Megatron to implement HybridEngine and weight sharing between vllm and Megatron.
- We assume the Megatron tp+dp+pp world is already established before calling this function.
"""
# Device mesh for using DTensor
_DEVICE_MESH = None
# Tensor model parallel group that the current rank belongs to.
_TP = None
# Pipeline model parallel group that the current rank belongs to.
_PP = None
# This method is for initializing the ParallelGroup when using HybridEngine
def initialize_parallel_state(
distributed_init_method: str = "env://",
backend: str = "nccl",
tensor_model_parallel_size: int = 1,
num_tp_per_train_tp: int = 1,
pipeline_model_parallel_size: int = 1,
):
# torch.distributed.all_reduce does not free the input tensor until
# the synchronization point. This causes the memory usage to grow
# as the number of all_reduce calls increases. This env var disables
# this behavior.
# Related issue:
# https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
# NOTE(sgm): Modify for verl, Env vars will be set by TORCHRUN.
rank = int(os.getenv("RANK", "-1"))
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# Use the world_size set by TORCHRUN
world_size = int(os.getenv("WORLD_SIZE", "-1"))
assert world_size != -1, "The world_size is set to -1, not initialized by TORCHRUN"
init_distributed_environment(world_size, rank, distributed_init_method, local_rank, backend)
if torch.distributed.get_world_size() > 1:
# NOTE: build a sepearate inference group with infer tp & micro dp
initialize_model_parallel_for_vllm(
tensor_model_parallel_size=tensor_model_parallel_size,
num_tensor_model_parallel_groups_per_train_tp=num_tp_per_train_tp,
)
else:
initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, backend)
def ensure_model_parallel_initialized(
tensor_model_parallel_size: int,
pipeline_model_parallel_size: int = 1,
backend: Optional[str] = None,
) -> None:
"""Helper to initialize model parallel groups if they are not initialized,
or ensure tensor-parallel and pipeline-parallel sizes are equal to expected
values if the model parallel groups are initialized.
"""
# get the backend of _DEVICE_WORLD_GROUP
backend = backend or torch.distributed.get_backend(get_world_group().device_group)
if not model_parallel_is_initialized():
initialize_model_parallel(tensor_model_parallel_size, pipeline_model_parallel_size, backend)
return
assert get_tensor_model_parallel_world_size() == tensor_model_parallel_size, (
"tensor parallel group already initialized, but of unexpected size: "
f"{get_tensor_model_parallel_world_size()=} vs. "
f"{tensor_model_parallel_size=}")
pp_world_size = get_pp_group().world_size
assert pp_world_size == pipeline_model_parallel_size, (
"pipeline parallel group already initialized, but of unexpected size: "
f"{pp_world_size=} vs. "
f"{pipeline_model_parallel_size=}")
# TODO(sgm): deviate from the v0.5.4, not pp now
def model_parallel_is_initialized():
"""Check if tensor and pipeline parallel groups are initialized."""
return ps._TP is not None
# and _PIPELINE_MODEL_PARALLEL_GROUP is not None)
def initialize_model_parallel_for_vllm(
tensor_model_parallel_size: int,
num_tensor_model_parallel_groups_per_train_tp: int = 1,
pipeline_model_parallel_size: int = 1,
) -> None:
pass
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
assert isinstance(tensor_model_parallel_size, int)
# assert num_tensor_model_parallel_groups_per_train_tp == 1 and not different_tp_group
# assert num_tensor_model_parallel_groups_per_train_tp > 1 and different_tp_group
# Build the tensor model-parallel groups.
assert ps._TP is None, "tensor model parallel group is already initialized"
global _TP
world_size: int = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
backend = torch.distributed.get_backend()
num_tensor_model_parallel_groups = world_size // tensor_model_parallel_size
if num_tensor_model_parallel_groups_per_train_tp == 1:
# if tensor_model_parallel_size == train_tensor_parallel_size:
# using the same tp group as Megatron/vllm
assert _TP is None, "tensor model parallel group is already initialized"
group_ranks = []
for i in range(num_tensor_model_parallel_groups):
ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
group_ranks.append(ranks)
_TP = init_model_parallel_group(
group_ranks=group_ranks,
local_rank=get_world_group().local_rank,
backend=backend,
use_custom_allreduce=False, # TODO: check why True is not work in Ray trainer
use_message_queue_broadcaster=True,
)
ps._TP = _TP
# _MICRO_DATA_PARALLEL_GROUP is move to hybrid engine
else:
# initialize a micro_dp group and a tp group
# assume training tp=4, infer tp=2, then, weight is partitioned as
# [1], [2], [3], [4] for training and [1,2], [1,2], [3,4], [3,4] for inference
# Build the inference tp groups
# train_tp = train_tensor_parallel_size
train_tp = num_tensor_model_parallel_groups_per_train_tp * tensor_model_parallel_size
# num_tensor_model_parallel_groups_per_train_tp = train_tp // tensor_model_parallel_size
assert _TP is None, "tensor model parallel group is already initialized"
group_ranks = []
for i in range(num_tensor_model_parallel_groups // num_tensor_model_parallel_groups_per_train_tp):
start = train_tp * i
end = train_tp * (i + 1)
for j in range(num_tensor_model_parallel_groups_per_train_tp):
ranks = list(range(start, end, num_tensor_model_parallel_groups_per_train_tp))
for i in range(len(ranks)):
ranks[i] += j
group_ranks.append(ranks)
_TP = init_model_parallel_group(
group_ranks=group_ranks,
local_rank=get_world_group().local_rank,
backend=backend,
use_custom_allreduce=False, # TODO: check why True is not work in Ray trainer
use_message_queue_broadcaster=True,
)
ps._TP = _TP
# Build the pipeline model-parallel groups.
# global _PIPELINE_MODEL_PARALLEL_GROUP
# global _PIPELINE_GLOBAL_RANKS
# assert ps._PIPELINE_MODEL_PARALLEL_GROUP is None, ("pipeline model parallel group is already initialized")
# ps._PIPELINE_MODEL_PARALLEL_GROUP = mpu.get_pipeline_model_parallel_group()
# ps._PIPELINE_GLOBAL_RANKS = mpu.get_pipeline_model_parallel_ranks()
# TODO: init using device mesh (not support hybrid engine now)
# Build the pipeline model-parallel groups.
num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size
global _PP
assert _PP is None, "pipeline model parallel group is already initialized"
group_ranks = []
for i in range(num_pipeline_model_parallel_groups):
ranks = list(range(i, world_size, num_pipeline_model_parallel_groups))
group_ranks.append(ranks)
# pipeline parallel does not need custom allreduce
_PP = init_model_parallel_group(group_ranks, get_world_group().local_rank, backend, use_custom_allreduce=False)
ps._PP = _PP # for verl
def initialize_model_parallel(
tensor_model_parallel_size: int = 1,
pipeline_model_parallel_size: int = 1,
backend: Optional[str] = None,
) -> None:
"""
NOTE: This method is a hack from the open-sourced version without
asertion of world_size = tp * pp
Initialize model parallel groups.
Arguments:
tensor_model_parallel_size: number of GPUs used for tensor model
parallelism.
pipeline_model_parallel_size: number of GPUs used for pipeline model
parallelism.
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
the model pipeline. The present function will
create 4 tensor model-parallel groups and 2 pipeline model-parallel groups:
4 tensor model-parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 pipeline model-parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
world_size: int = torch.distributed.get_world_size()
backend = backend or torch.distributed.get_backend(ps.get_world_group().device_group)
# NOTE(sgm) we don't assert world_size == tp * pp
# DP is not managed by vllm but by the VeRL WorkerGroup
# if (world_size !=
# tensor_model_parallel_size * pipeline_model_parallel_size):
# raise RuntimeError(
# f"world_size ({world_size}) is not equal to "
# f"tensor_model_parallel_size ({tensor_model_parallel_size}) x "
# f"pipeline_model_parallel_size ({pipeline_model_parallel_size})")
num_tensor_model_parallel_groups: int = world_size // tensor_model_parallel_size
rank = torch.distributed.get_rank()
global _TP
assert _TP is None, "tensor model parallel group is already initialized"
group_ranks = []
for i in range(num_tensor_model_parallel_groups):
ranks = list(range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size))
group_ranks.append(ranks)
# message queue broadcaster is only used in tensor model parallel group
_TP = init_model_parallel_group(
group_ranks,
get_world_group().local_rank,
backend,
use_custom_allreduce=False, # TODO: check why True is not work in Ray trainer
use_message_queue_broadcaster=True,
)
ps._TP = _TP
# TODO: init using device mesh (not support hybrid engine now)
# Build the pipeline model-parallel groups.
num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size
global _PP
assert _PP is None, "pipeline model parallel group is already initialized"
group_ranks = []
for i in range(num_pipeline_model_parallel_groups):
ranks = list(range(i, world_size, num_pipeline_model_parallel_groups))
group_ranks.append(ranks)
# pipeline parallel does not need custom allreduce
_PP = init_model_parallel_group(group_ranks, get_world_group().local_rank, backend, use_custom_allreduce=False)
ps._PP = _PP # for verl
"""
Device mesh utilities
"""
def get_device_mesh():
assert _DEVICE_MESH is not None, "device mesh is not initialized"
return _DEVICE_MESH
"""
Tensor model parallel utilities
"""
def get_tensor_model_parallel_group():
"""Get the tensor model parallel group the caller rank belongs to."""
assert _TP is not None, "tensor model parallel group is not initialized"
return _TP.device_group
def get_tensor_model_parallel_world_size():
"""Return world size for the tensor model parallel group."""
return torch.distributed.get_world_size(group=get_tensor_model_parallel_group())
def get_tensor_model_parallel_rank():
"""Return my rank for the tensor model parallel group."""
return torch.distributed.get_rank(group=get_tensor_model_parallel_group())
def get_tensor_model_parallel_src_rank():
"""Calculate the global rank corresponding to the first local rank
in the tensor model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_tensor_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/spmd_gpu_executor.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/executor/gpu_executor.py
import os
import socket
from typing import Dict, List, Optional, Set, Tuple
import torch
from vllm.config import (
CacheConfig,
DeviceConfig,
LoRAConfig,
ObservabilityConfig,
ParallelConfig,
PromptAdapterConfig,
SchedulerConfig,
SpeculativeConfig,
)
from vllm.executor.executor_base import ExecutorAsyncBase, ExecutorBase
from vllm.logger import init_logger
from vllm.lora.request import LoRARequest
from vllm.model_executor.layers.sampler import SamplerOutput
from vllm.sequence import ExecuteModelRequest
from .config import LoadConfig, ModelConfig
logger = init_logger(__name__)
class SPMDGPUExecutor(ExecutorBase):
"""SPMD-based multi-GPU executor implementations."""
def __init__(
self,
model, # pytorch model itself or its parameter dict
model_config: ModelConfig,
cache_config: CacheConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
load_config: LoadConfig,
lora_config: Optional[LoRAConfig],
speculative_config: Optional[SpeculativeConfig],
prompt_adapter_config: Optional[PromptAdapterConfig],
observability_config: Optional[ObservabilityConfig],
) -> None:
self.model_config = model_config
self.cache_config = cache_config
self.lora_config = lora_config
self.load_config = load_config
self.parallel_config = parallel_config
self.scheduler_config = scheduler_config
self.device_config = device_config
self.speculative_config = speculative_config
self.prompt_adapter_config = prompt_adapter_config
self.observability_config = observability_config
distributed_init_method = initialize_cluster(parallel_config)
self._init_executor(model, distributed_init_method)
# TODO(sgm): verl not support speculative decode now
def _init_executor(self, model, distributed_init_method) -> None:
assert not self.speculative_config, "Speculative decoding not yet supported for multi-GPU backend."
# Create the parallel worker for each GPU.
self._init_workers_sp(model, distributed_init_method)
def _init_workers_sp(self, model, distributed_init_method: str):
# Lazy import the Worker to avoid importing torch.cuda/xformers
# before CUDA_VISIBLE_DEVICES is set in the Worker
from .worker import Worker # pylint: disable=import-outside-toplevel
rank = int(os.getenv("RANK"))
local_rank = int(os.getenv("LOCAL_RANK"))
print(f"local rank {local_rank}")
# see https://github.com/NVIDIA/nccl/issues/1234
os.environ["NCCL_CUMEM_ENABLE"] = "0"
self.worker = Worker(
model,
self.model_config,
self.parallel_config,
self.scheduler_config,
self.device_config,
self.cache_config,
self.load_config,
local_rank,
rank,
distributed_init_method,
lora_config=self.lora_config,
speculative_config=None,
prompt_adapter_config=self.speculative_config,
is_driver_worker=True,
model_runner_cls=None, # use the default one
)
# NOTE(shengguangming): torch.distributed.init_process_group will be called inside the init_model()
self.worker.init_device()
self.worker.load_model()
def determine_num_available_blocks(self) -> Tuple[int, int]:
"""Determine the number of available KV blocks.
This invokes `determine_num_available_blocks` on each worker and takes
the min of the results, guaranteeing that the selected cache sizes are
compatible with all workers.
Returns:
- tuple[num_gpu_blocks, num_cpu_blocks]
"""
# Get the maximum number of blocks that can be allocated on GPU and CPU.
num_blocks = self.worker.determine_num_available_blocks()
# NOTE(shengguangming): Now we don't use a shared centralized controler but each process will
# have its own scheduler
num_gpu_blocks = num_blocks[0]
num_cpu_blocks = num_blocks[1]
return num_gpu_blocks, num_cpu_blocks
def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks: int) -> None:
"""Initialize the KV cache in all workers."""
# NOTE: We log here to avoid multiple logs when number of workers is
# greater than one. We could log in the engine, but not all executors
# have GPUs.
logger.info("# GPU blocks: %d, # CPU blocks: %d", num_gpu_blocks, num_cpu_blocks)
self.cache_config.num_gpu_blocks = num_gpu_blocks
self.cache_config.num_cpu_blocks = num_cpu_blocks
if torch.distributed.get_rank() == 0:
print(
f"before init cache memory allocated: {torch.cuda.memory_allocated() / 1e9}GB, reserved: {torch.cuda.memory_reserved() / 1e9}GB"
)
self.worker.initialize_cache(num_gpu_blocks=num_gpu_blocks, num_cpu_blocks=num_cpu_blocks)
if torch.distributed.get_rank() == 0:
print(
f"after init cache memory allocated: {torch.cuda.memory_allocated() / 1e9}GB, reserved: {torch.cuda.memory_reserved() / 1e9}GB"
)
# NOTE(sgm): This will not profile & capture the model(CUDAGraph) when rebuilding KVCache
def init_cache_engine(self) -> None:
self.worker._init_cache_engine()
def free_cache_engine(self) -> None:
self.worker.free_cache_engine()
def execute_model(self, execute_model_req) -> List[SamplerOutput]:
all_outputs = self.worker.execute_model(execute_model_req=execute_model_req)
# NOTE(sgm):
# Each GPU in vllm under verl has its own spmd_gpu_executor, therefore all GPUs should return the outputs
# In vllm with ray, only the driver worker returns the sampling results.
return all_outputs
def add_lora(self, lora_request: LoRARequest) -> bool:
assert lora_request.lora_int_id > 0, "lora_id must be greater than 0."
return self.worker.add_lora(lora_request=lora_request)
def remove_lora(self, lora_id: int) -> bool:
assert lora_id > 0, "lora_id must be greater than 0."
return self.worker.remove_lora(lora_id=lora_id)
def list_loras(self) -> Set[int]:
return self.worker.list_loras()
def check_health(self) -> None:
# SPMDExecutor will always be healthy as long as
# it's running.
return
# NOTE(sgm) add for verl to pass the abstract class test, not used
from vllm.prompt_adapter.request import PromptAdapterRequest
def add_prompt_adapter(self, prompt_adapter_request: PromptAdapterRequest) -> bool:
assert prompt_adapter_request.prompt_adapter_id > 0, "prompt_adapter_id must be greater than 0."
return self.worker.add_prompt_adapter(prompt_adapter_request)
def list_prompt_adapters(self) -> Set[int]:
return self.worker.list_prompt_adapters()
def pin_lora(self, lora_id: int) -> bool:
assert lora_id > 0, "lora_id must be greater than 0."
return self.worker.pin_lora(lora_id)
def pin_prompt_adapter(self, prompt_adapter_id: int) -> bool:
assert prompt_adapter_id > 0, "prompt_adapter_id must be greater than 0."
return self.worker.pin_prompt_adapter(prompt_adapter_id)
def remove_prompt_adapter(self, prompt_adapter_id: int) -> bool:
assert prompt_adapter_id > 0, "prompt_adapter_id must be greater than 0."
return self.worker.remove_prompt_adapter(prompt_adapter_id)
# NOTE(sgm): add for verl
def offload_model_weights(self) -> None:
self.worker.offload_model_weights()
def sync_model_weights(self, actor_weights: Dict[str, torch.Tensor], load_format: str) -> None:
self.worker.sync_model_weights(actor_weights=actor_weights, load_format=load_format)
def initialize_cluster(
parallel_config: ParallelConfig,
engine_use_ray: bool = False,
ray_address: Optional[str] = None,
) -> Tuple[str, Optional[None]]:
"""Initialize the distributed cluster probably with Ray.
Args:
parallel_config: The configurations for parallel execution.
Returns:
The `distributed_init_method` is the address for initializing the
distributed backend.
"""
# Initialize cluster locally.
port = get_open_port()
# We need to setup the distributed init method to make sure
# the distributed megatron code (e.g., get world size) works correctly.
# distributed_init_method = f"tcp://localhost:{port}"
distributed_init_method = "env://"
return distributed_init_method
def get_open_port():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("", 0))
return s.getsockname()[1]
# TODO(sgm): not implemented async executor yet
class SPMDGPUExecutorAsync(SPMDGPUExecutor, ExecutorAsyncBase):
async def execute_model_async(self, execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]:
"""Executes one model step on the given sequences."""
raise NotImplementedError
async def check_health_async(self) -> None:
"""Checks if the executor is healthy. If not, it should raise an
exception."""
self.check_health()
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/tokenizer.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/transformers_utils/tokenizer_group/tokenizer_group.py
from typing import Optional
from transformers import PreTrainedTokenizer
from vllm.transformers_utils.tokenizer_group import TokenizerGroup
from vllm.utils import LRUCache
class TokenizerGroup(TokenizerGroup):
"""A group of tokenizers that can be used for LoRA adapters."""
def __init__(self, tokenizer: PreTrainedTokenizer, enable_lora: bool, max_num_seqs: int,
max_input_length: Optional[int]):
self.enable_lora = enable_lora
self.max_input_length = max_input_length
self.tokenizer = tokenizer
self.lora_tokenizers = LRUCache[PreTrainedTokenizer](capacity=max_num_seqs) if enable_lora else None
# FIXME(sgm): for simplicity, we assign the special token here
@property
def pad_token_id(self):
return self.tokenizer.pad_token_id
@property
def eos_token_id(self):
return self.tokenizer.eos_token_id
================================================
FILE: verl/third_party/vllm/vllm_v_0_6_3/worker.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2023 The vLLM team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/vllm-project/vllm/blob/main/vllm/worker/worker.py
"""A GPU worker class."""
import gc
import os
from typing import Dict, List, Optional, Tuple, Type, Union
import torch
import torch.distributed
import torch.nn as nn
from vllm.config import (
CacheConfig,
DeviceConfig,
LoRAConfig,
ParallelConfig,
PromptAdapterConfig,
SchedulerConfig,
SpeculativeConfig,
)
# TODO(sgm): check why vllm has similar file in vllm.model_executor.parallel_utils.parallel_state
from vllm.distributed import get_tensor_model_parallel_group, init_distributed_environment, set_custom_all_reduce
from vllm.model_executor import set_random_seed
from vllm.model_executor.layers.sampler import SamplerOutput
from vllm.sequence import ExecuteModelRequest, IntermediateTensors
from vllm.worker.cache_engine import CacheEngine
from vllm.worker.embedding_model_runner import EmbeddingModelRunner
from vllm.worker.model_runner import GPUModelRunnerBase
from vllm.worker.model_runner_base import ModelRunnerInputBase
from vllm.worker.worker import Worker, _check_if_gpu_supports_dtype
from vllm.worker.worker_base import WorkerInput
from .config import LoadConfig, LoadFormat, ModelConfig
from .dtensor_weight_loaders import load_dtensor_weights
from .hf_weight_loader import load_hf_weights
from .megatron_weight_loaders import load_megatron_weights
from .model_runner import ModelRunner
from .parallel_state import ensure_model_parallel_initialized
class Worker(Worker):
"""A worker class that executes (a partition of) the model on a GPU.
Each worker is associated with a single GPU. The worker is responsible for
maintaining the KV cache and executing the model on the GPU. In case of
distributed inference, each worker is assigned a partition of the model.
"""
def __init__(
self,
model: Union[nn.Module, Dict], # model itself or its parameter dict
model_config: ModelConfig,
parallel_config: ParallelConfig,
scheduler_config: SchedulerConfig,
device_config: DeviceConfig,
cache_config: CacheConfig,
load_config: LoadConfig,
local_rank: int,
rank: int,
distributed_init_method: str,
lora_config: Optional[LoRAConfig] = None,
speculative_config: Optional[SpeculativeConfig] = None,
prompt_adapter_config: Optional[PromptAdapterConfig] = None,
is_driver_worker: bool = False,
model_runner_cls: Optional[Type[GPUModelRunnerBase]] = None,
) -> None:
# self.model = model # will be replaced in the init_model
self.model_config = model_config
self.parallel_config = parallel_config
self.parallel_config.rank = rank
self.scheduler_config = scheduler_config
self.device_config = device_config
self.cache_config = cache_config
self.local_rank = local_rank
self.rank = rank
self.distributed_init_method = distributed_init_method
self.lora_config = lora_config
self.load_config = load_config
self.prompt_adapter_config = prompt_adapter_config
self.is_driver_worker = is_driver_worker # TODO: we don't need driver
# if parallel_config and is_driver_worker:
# assert rank % parallel_config.tensor_parallel_size == 0, \
# "Driver worker should be rank 0 of tensor parallel group."
if self.model_config.trust_remote_code:
# note: lazy import to avoid importing torch before initializing
from vllm.utils import init_cached_hf_modules
init_cached_hf_modules()
# Return hidden states from target model if the draft model is an
# mlp_speculator
speculative_args = (
{} if speculative_config is None or (speculative_config.draft_model_config.model == model_config.model) or
(speculative_config.draft_model_config.hf_config.model_type not in ["medusa", "mlp_speculator"]) else {
"return_hidden_states": True
})
# TODO(sgm): set correct model runner class
ModelRunnerClass: Type[GPUModelRunnerBase] = ModelRunner
if model_runner_cls is not None:
ModelRunnerClass = model_runner_cls
elif self.model_config.embedding_mode:
ModelRunnerClass = EmbeddingModelRunner
self.model_runner: GPUModelRunnerBase = ModelRunnerClass(
model, # [VERL]: add for verl
model_config,
parallel_config,
scheduler_config,
device_config,
cache_config,
load_config=load_config,
lora_config=self.lora_config,
kv_cache_dtype=self.cache_config.cache_dtype,
is_driver_worker=is_driver_worker,
prompt_adapter_config=prompt_adapter_config,
**speculative_args,
)
# Uninitialized cache engine. Will be initialized by
# initialize_cache.
self.cache_engine: List[CacheEngine] = None
# Initialize gpu_cache as embedding models don't initialize kv_caches
self.gpu_cache: Optional[List[List[torch.Tensor]]] = None
# NOTE(sgm): [VERL] For offloading inference engine params
self.cpu_model = None
def init_device(self) -> None:
if self.device_config.device.type == "cuda":
# torch.distributed.all_reduce does not free the input tensor until
# the synchronization point. This causes the memory usage to grow
# as the number of all_reduce calls increases. This env var disables
# this behavior.
# Related issue:
# https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
# NOTE(sgm): Modify for verl, Env vars will be set by TORCHRUN.
self.rank = self.rank if self.rank is not None else int(os.getenv("RANK", "-1"))
local_rank = int(os.getenv("LOCAL_RANK", "0"))
self.device = torch.device(f"cuda:{local_rank}")
if self.rank < 0:
raise ValueError("Invalid or unspecified rank.")
torch.cuda.set_device(self.device)
# Use the world_size set by TORCHRUN
world_size = int(os.getenv("WORLD_SIZE", "-1"))
assert world_size != -1, "The world_size is set to -1, not initialized by TORCHRUN"
self.parallel_config.world_size = world_size
_check_if_gpu_supports_dtype(self.model_config.dtype)
torch.cuda.empty_cache()
self.init_gpu_memory = torch.cuda.mem_get_info()[0]
else:
raise RuntimeError(f"Not support device type: {self.device_config.device}")
# Initialize the distributed environment.
init_worker_distributed_environment(self.parallel_config, self.rank, self.distributed_init_method,
self.local_rank)
# Set random seed.
set_random_seed(self.model_config.seed)
# self.model = get_model(actor_model=self.model, model_config=self.model_config)
@torch.inference_mode()
def determine_num_available_blocks(self) -> Tuple[int, int]:
"""Profiles the peak memory usage of the model to determine how many
KV blocks may be allocated without OOMs.
The engine will first conduct a profiling of the existing memory usage.
Then, it calculate the maximum possible number of GPU and CPU blocks
that can be allocated with the remaining free memory.
.. tip::
You may limit the usage of GPU memory
by adjusting the `gpu_memory_utilization` parameter.
"""
# Profile the memory usage of the model and get the maximum number of
# cache blocks that can be allocated with the remaining free memory.
torch.cuda.empty_cache()
# torch.cuda.reset_peak_memory_stats()
# Execute a forward pass with dummy inputs to profile the memory usage
# of the model.
self.model_runner.profile_run()
# Calculate the number of blocks that can be allocated with the
# profiled peak memory.
torch.cuda.synchronize()
free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info()
peak_memory = total_gpu_memory - free_gpu_memory
assert peak_memory > 0, ("Error in memory profiling. This happens when the GPU memory was "
"not properly cleaned up before initializing the vLLM instance.")
cache_block_size = self.get_cache_block_size_bytes()
# NOTE(sgm) [VERL] use the remaining memory
num_gpu_blocks = int((free_gpu_memory * self.cache_config.gpu_memory_utilization) // cache_block_size)
# num_gpu_blocks = int((total_gpu_memory * self.cache_config.gpu_memory_utilization - peak_memory) // cache_block_size)
num_cpu_blocks = int(self.cache_config.swap_space_bytes // cache_block_size)
num_gpu_blocks = max(num_gpu_blocks, 0)
num_cpu_blocks = max(num_cpu_blocks, 0)
if self.model_runner.lora_manager:
self.model_runner.remove_all_loras()
# NOTE(sgm): Add for [VERL], synchronize number of blocks with all the rank
num_gpu_blocks = torch.tensor([num_gpu_blocks], device="cuda")
num_cpu_blocks = torch.tensor([num_cpu_blocks], device="cuda")
torch.distributed.all_reduce(num_gpu_blocks,
op=torch.distributed.ReduceOp.MIN,
group=get_tensor_model_parallel_group().device_group)
torch.distributed.all_reduce(num_cpu_blocks,
op=torch.distributed.ReduceOp.MIN,
group=get_tensor_model_parallel_group().device_group)
num_gpu_blocks = num_gpu_blocks.item()
num_cpu_blocks = num_cpu_blocks.item()
gc.collect()
torch.cuda.empty_cache()
return num_gpu_blocks, num_cpu_blocks
def _init_cache_engine(self):
if self.cache_engine is None and self.gpu_cache is None:
super()._init_cache_engine()
def free_cache_engine(self):
# ensure `enforce_eager=True`
self.cache_engine = None
self.gpu_cache = None
# NOTE(sgm): [VERL]: adapt from _execute_model_spmd()
def execute_model(self,
execute_model_req: ExecuteModelRequest,
intermediate_tensors: Optional[IntermediateTensors] = None) -> Optional[List[SamplerOutput]]:
"""
Execute model in Single Program Multiple Data (SPMD) fashion.
All workers take the same request, prepare the input and
execute the model.
"""
assert execute_model_req is not None, ("_execute_model_spmd() requires each worker to take in an "
"ExecuteModelRequest")
worker_input: WorkerInput = self.prepare_worker_input(execute_model_req=execute_model_req)
model_input: ModelRunnerInputBase = self.model_runner.prepare_model_input(
execute_model_req.seq_group_metadata_list)
# verl.worker.workerbase.WorkerBase
# swap cache
super().execute_worker(worker_input)
# If there is no input, we don't need to execute the model.
if worker_input.num_seq_groups == 0:
return []
return self.model_runner.execute_model(
model_input,
self.kv_cache[worker_input.virtual_engine] if self.kv_cache is not None else None,
intermediate_tensors,
)
# assume the input is .state_dict()
def sync_model_weights(self, actor_weights: Dict, load_format: str):
if load_format in [LoadFormat.MEGATRON, LoadFormat.AUTO]:
load_megatron_weights(actor_weights, self.model_runner.model)
elif load_format == LoadFormat.HF:
# full model state dict without no sharding
load_hf_weights(actor_weights, self.model_runner.model)
elif load_format == LoadFormat.DTENSOR:
load_dtensor_weights(actor_weights, self.model_runner.model)
def offload_model_weights(self) -> None:
if self.cpu_model == None:
self.cpu_model = {}
for name, params in self.model_runner.model.named_parameters():
self.cpu_model[name] = torch.empty_like(params, device="cpu")
params.data = self.cpu_model[name]
else:
for name, params in self.model_runner.model.named_parameters():
params.data = self.cpu_model[name]
def init_worker_distributed_environment(
parallel_config: ParallelConfig,
rank: int,
distributed_init_method: Optional[str] = "env://",
local_rank: int = -1,
) -> None:
"""Initialize the distributed environment."""
set_custom_all_reduce(not parallel_config.disable_custom_all_reduce)
# NOTE(sgm) use tcp://localhost:xxxx will hang in HF setting without megatron
init_distributed_environment(parallel_config.world_size, rank, distributed_init_method, local_rank)
ensure_model_parallel_initialized(
tensor_model_parallel_size=parallel_config.tensor_parallel_size,
pipeline_model_parallel_size=parallel_config.pipeline_parallel_size,
)
# TODO(sgm): check whether need this
# if pynccl_utils.is_initialized():
# pynccl_world_size = pynccl_utils.get_world_size()
# if pynccl_world_size != parallel_config.world_size:
# raise RuntimeError(
# "pynccl is already initialized but the pynccl world "
# "size does not match parallel_config.world_size "
# f"({pynccl_world_size} vs. {parallel_config.world_size}).")
# elif parallel_config.world_size > 1:
# # NOTE(woosuk): We don't initialize pynccl process group when world size
# # is 1.
# # NOTE(kaichao): By default, pynccl is initialized for tp group.
# pynccl_utils.init_process_group(
# group=get_tensor_model_parallel_cpu_group())
# # Initialize a custom fast all-reduce implementation.
# if not parallel_config.disable_custom_all_reduce:
# init_custom_ar()
# A small all_reduce for warmup.
torch.distributed.all_reduce(torch.zeros(1).cuda())
# if pynccl_utils.is_initialized():
# pynccl_utils.all_reduce(torch.zeros(1).cuda())
================================================
FILE: verl/trainer/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/trainer/config/evaluation.yaml
================================================
data:
path: /tmp/math_Qwen2-7B-Instruct.parquet
prompt_key: prompt
response_key: responses
data_source_key: data_source
reward_model_key: reward_model
================================================
FILE: verl/trainer/config/generation.yaml
================================================
trainer:
nnodes: 1
n_gpus_per_node: 8
data:
path: ~/data/rlhf/math/test.parquet
prompt_key: prompt
n_samples: 5
output_path: /opt/tiger/math_Qwen2-7B-Instruct.parquet
batch_size: 128
model:
path: ~/models/Qwen2-7B-Instruct
external_lib: null
rollout:
name: vllm
temperature: 1.0
top_k: 50 # 0 for hf rollout, -1 for vllm rollout
top_p: 0.7
prompt_length: 1536
response_length: 512
# for vllm rollout
dtype: bfloat16 # should align with FSDP
gpu_memory_utilization: 0.5
ignore_eos: False
enforce_eager: True
free_cache_engine: True
load_format: dummy_dtensor
tensor_model_parallel_size: 1
max_num_batched_tokens: 8192
max_model_len: null
max_num_seqs: 1024
log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu
log_prob_micro_batch_size_per_gpu: 8
# for fire vllm rollout
use_fire_sampling: False # enable FIRE https://arxiv.org/abs/2410.21236
# for hf rollout
do_sample: True
disable_log_stats: True
enable_chunked_prefill: True
n: 1
actor:
strategy: fsdp # This is for backward-compatibility
ppo_mini_batch_size: 256
ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu
ppo_micro_batch_size_per_gpu: null
use_dynamic_bsz: False
ppo_max_token_len_per_gpu: 16384 # n * ${data.max_prompt_length} + ${data.max_response_length}
grad_clip: 1.0
clip_ratio: 0.2
entropy_coeff: 0.001
use_kl_loss: False # True for GRPO
kl_loss_coef: 0.001 # for grpo
kl_loss_type: low_var_kl # for grpo
ppo_epochs: 1
shuffle: False
ulysses_sequence_parallel_size: 1 # sp size
optim:
lr: 1e-6
lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime
min_lr_ratio: null # only useful for warmup with cosine
warmup_style: constant # select from constant/cosine
total_training_steps: -1 # must be override by program
fsdp_config:
wrap_policy:
min_num_params: 0
param_offload: False
optimizer_offload: False
fsdp_size: -1
================================================
FILE: verl/trainer/config/ppo_megatron_trainer.yaml
================================================
data:
tokenizer: null
train_files: ~/data/rlhf/gsm8k/train.parquet
val_files: ~/data/rlhf/gsm8k/test.parquet
prompt_key: prompt
max_prompt_length: 512
max_response_length: 512
train_batch_size: 1024
val_batch_size: null # DEPRECATED: Validation datasets are sent to inference engines as a whole batch, which will schedule the memory themselves
return_raw_input_ids: False # This should be set to true when the tokenizer between policy and rm differs
return_raw_chat: False
shuffle: True
actor_rollout_ref:
hybrid_engine: True
model:
path: ~/models/deepseek-llm-7b-chat
external_lib: null
override_config: {}
enable_gradient_checkpointing: False
actor:
strategy: megatron # This is for backward-compatibility
ppo_mini_batch_size: 256
ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu
ppo_micro_batch_size_per_gpu: null
use_dynamic_bsz: False
clip_ratio: 0.2
entropy_coeff: 0.001
ppo_epochs: 1
shuffle: True
optim:
lr: 1e-6
clip_grad: 1.0
lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime
min_lr_ratio: null # only useful for warmup with cosine
warmup_style: constant # select from constant/cosine
total_training_steps: -1 # must be override by program
megatron:
tensor_model_parallel_size: 4
pipeline_model_parallel_size: 1
num_layers_per_virtual_pipeline_stage: null # vpp will hang. need debug.
sequence_parallel: True
seed: 1
load_weight: True
ref:
megatron:
tensor_model_parallel_size: 4
pipeline_model_parallel_size: 1
num_layers_per_virtual_pipeline_stage: null # vpp will hang. need debug.
sequence_parallel: True
seed: 1
load_weight: True
param_offload: False
log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu
log_prob_micro_batch_size_per_gpu: null
rollout:
name: vllm
temperature: 1.0
top_k: -1 # 0 for hf rollout, -1 for vllm rollout
top_p: 1
prompt_length: ${data.max_prompt_length} # for xperf_gpt
response_length: ${data.max_response_length}
# for vllm rollout
dtype: bfloat16 # should align with FSDP
gpu_memory_utilization: 0.5
ignore_eos: False
enforce_eager: True
free_cache_engine: True
load_format: dummy_megatron
tensor_model_parallel_size: 2
max_num_batched_tokens: 8192
max_model_len: null
max_num_seqs: 1024
log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu
log_prob_micro_batch_size_per_gpu: null
disable_log_stats: True
enable_chunked_prefill: False # could get higher throughput
# for hf rollout
do_sample: True
layer_name_map:
qkv_layer_name: qkv
gate_proj_layer_name: gate_up
# number of responses (i.e. num sample times)
n: 1
critic:
strategy: megatron
optim:
lr: 1e-5
clip_grad: 1.0
lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime
min_lr_ratio: null # only useful for warmup with cosine
warmup_style: constant # select from constant/cosine
total_training_steps: -1 # must be override by program
model:
path: ~/models/deepseek-llm-7b-chat
tokenizer_path: ${actor_rollout_ref.model.path}
override_config: {}
external_lib: ${actor_rollout_ref.model.external_lib}
enable_gradient_checkpointing: False
megatron:
tensor_model_parallel_size: 4
pipeline_model_parallel_size: 1
num_layers_per_virtual_pipeline_stage: null # vpp will hang. need debug.
sequence_parallel: True
seed: 1
load_weight: True
ppo_mini_batch_size: ${actor_rollout_ref.actor.ppo_mini_batch_size}
ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu
ppo_micro_batch_size_per_gpu: null
use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz}
ppo_epochs: ${actor_rollout_ref.actor.ppo_epochs}
shuffle: ${actor_rollout_ref.actor.shuffle}
cliprange_value: 0.5
kl_ctrl:
type: fixed
kl_coef: 0.001
reward_model:
enable: False
strategy: megatron
megatron:
tensor_model_parallel_size: 4
pipeline_model_parallel_size: 1
num_layers_per_virtual_pipeline_stage: null # vpp will hang. need debug.
sequence_parallel: True
seed: 1
model:
input_tokenizer: ${actor_rollout_ref.model.path} # set this to null if the chat template is identical
path: ~/models/FsfairX-LLaMA3-RM-v0.1
external_lib: ${actor_rollout_ref.model.external_lib}
load_weight: True
param_offload: False
micro_batch_size: null # will be deprecated, use micro_batch_size_per_gpu
micro_batch_size_per_gpu: null
use_dynamic_bsz: ${critic.use_dynamic_bsz}
max_length: null
algorithm:
gamma: 1.0
lam: 1.0
adv_estimator: gae
kl_penalty: kl # how to estimate kl divergence
kl_ctrl:
type: fixed
kl_coef: 0.001
trainer:
total_epochs: 30
total_training_steps: null
project_name: verl_examples
experiment_name: gsm8k
logger: ['console', 'wandb']
val_generations_to_log_to_wandb: 0
nnodes: 1
n_gpus_per_node: 8
save_freq: -1
# auto: find the last ckpt to resume. If can't find, start from scratch
resume_mode: disable # or auto or resume_path if
resume_from_path: False
test_freq: 2
critic_warmup: 0
default_hdfs_dir: null
default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name}
================================================
FILE: verl/trainer/config/ppo_trainer.yaml
================================================
data:
tokenizer: null
train_files: ~/data/rlhf/gsm8k/train.parquet
val_files: ~/data/rlhf/gsm8k/test.parquet
prompt_key: prompt
template_type: default
max_prompt_length: 512
val_max_prompt_length: 1024
max_response_length: 512
train_batch_size: 1024
val_batch_size: null # DEPRECATED: Validation datasets are sent to inference engines as a whole batch, which will schedule the memory themselves
return_raw_input_ids: False # This should be set to true when the tokenizer between policy and rm differs
return_raw_chat: False
shuffle: True
image_key: images
reward_type: default
execution_error_penalty: False
actor_rollout_ref:
hybrid_engine: True
model:
path: ~/models/deepseek-llm-7b-chat
external_lib: null
override_config: { }
enable_gradient_checkpointing: True
use_remove_padding: False
actor:
strategy: fsdp # This is for backward-compatibility
ppo_mini_batch_size: 256
ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu
ppo_micro_batch_size_per_gpu: null
use_dynamic_bsz: False
ppo_max_token_len_per_gpu: 16384 # n * ${data.max_prompt_length} + ${data.max_response_length}
grad_clip: 1.0
clip_ratio: 0.2
entropy_coeff: 0.001
use_kl_loss: False # True for GRPO
kl_loss_coef: 0.001 # for grpo
kl_loss_type: low_var_kl # for grpo
ppo_epochs: 1
shuffle: False
ulysses_sequence_parallel_size: 1 # sp size
optim:
lr: 1e-6
lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime
min_lr_ratio: null # only useful for warmup with cosine
warmup_style: constant # select from constant/cosine
total_training_steps: -1 # must be override by program
fsdp_config:
wrap_policy:
# transformer_layer_cls_to_wrap: None
min_num_params: 0
param_offload: False
optimizer_offload: False
fsdp_size: -1
ref:
fsdp_config:
param_offload: False
wrap_policy:
# transformer_layer_cls_to_wrap: None
min_num_params: 0
log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu
log_prob_micro_batch_size_per_gpu: null
log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz}
log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu}
ulysses_sequence_parallel_size: ${actor_rollout_ref.actor.ulysses_sequence_parallel_size} # sp size
rollout:
num_llm_calls_available: 3
name: vllm
temperature: 1.0
top_k: -1 # 0 for hf rollout, -1 for vllm rollout
top_p: 1
use_fire_sampling: False # https://arxiv.org/abs/2410.21236
prompt_length: ${data.max_prompt_length} # not use for opensource
response_length: ${data.max_response_length}
# for vllm rollout
dtype: bfloat16 # should align with FSDP
gpu_memory_utilization: 0.5
ignore_eos: False
enforce_eager: True
free_cache_engine: True
load_format: dummy_dtensor
tensor_model_parallel_size: 2
max_num_batched_tokens: 8192
max_model_len: null
max_num_seqs: 1024
log_prob_micro_batch_size: null # will be deprecated, use log_prob_micro_batch_size_per_gpu
log_prob_micro_batch_size_per_gpu: null
log_prob_use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz}
log_prob_max_token_len_per_gpu: ${actor_rollout_ref.actor.ppo_max_token_len_per_gpu}
disable_log_stats: True
enable_chunked_prefill: True # may get higher throughput when set to True. When activated, Please increase max_num_batched_tokens or decrease max_model_len.
# for hf rollout
do_sample: True
# number of responses (i.e. num sample times)
n: 1 # > 1 for grpo
critic:
strategy: fsdp
optim:
lr: 1e-5
lr_warmup_steps_ratio: 0. # the total steps will be injected during runtime
min_lr_ratio: null # only useful for warmup with cosine
warmup_style: constant # select from constant/cosine
total_training_steps: -1 # must be override by program
model:
path: ~/models/deepseek-llm-7b-chat
tokenizer_path: ${actor_rollout_ref.model.path}
override_config: { }
external_lib: ${actor_rollout_ref.model.external_lib}
enable_gradient_checkpointing: True
use_remove_padding: False
fsdp_config:
param_offload: False
optimizer_offload: False
wrap_policy:
# transformer_layer_cls_to_wrap: None
min_num_params: 0
fsdp_size: -1
ppo_mini_batch_size: ${actor_rollout_ref.actor.ppo_mini_batch_size}
ppo_micro_batch_size: null # will be deprecated, use ppo_micro_batch_size_per_gpu
ppo_micro_batch_size_per_gpu: null
forward_micro_batch_size: ${critic.ppo_micro_batch_size}
forward_micro_batch_size_per_gpu: ${critic.ppo_micro_batch_size_per_gpu}
use_dynamic_bsz: ${actor_rollout_ref.actor.use_dynamic_bsz}
ppo_max_token_len_per_gpu: 32768 # (${actor_rollout_ref.actor.ppo_max_token_len_per_gpu}) * 2
forward_max_token_len_per_gpu: ${critic.ppo_max_token_len_per_gpu}
ulysses_sequence_parallel_size: 1 # sp size
ppo_epochs: ${actor_rollout_ref.actor.ppo_epochs}
shuffle: ${actor_rollout_ref.actor.shuffle}
grad_clip: 1.0
cliprange_value: 0.5
reward_model:
enable: False
strategy: fsdp
model:
input_tokenizer: ${actor_rollout_ref.model.path} # set this to null if the chat template is identical
path: ~/models/FsfairX-LLaMA3-RM-v0.1
external_lib: ${actor_rollout_ref.model.external_lib}
use_remove_padding: False
fsdp_config:
min_num_params: 0
param_offload: False
fsdp_size: -1
micro_batch_size: null # will be deprecated, use micro_batch_size_per_gpu
micro_batch_size_per_gpu: null # set a number
max_length: null
ulysses_sequence_parallel_size: 1 # sp size
use_dynamic_bsz: ${critic.use_dynamic_bsz}
forward_max_token_len_per_gpu: ${critic.forward_max_token_len_per_gpu}
reward_manager: naive
algorithm:
gamma: 1.0
lam: 1.0
adv_estimator: gae
kl_penalty: kl # how to estimate kl divergence
kl_ctrl:
type: fixed
kl_coef: 0.001
trainer:
total_epochs: 30
total_training_steps: null
project_name: verl_examples
experiment_name: gsm8k
logger: [ 'console', 'wandb' ]
val_generations_to_log_to_wandb: 0
nnodes: 1
n_gpus_per_node: 8
save_freq: -1
samples_save_path: null
# auto: find the last ckpt to resume. If can't find, start from scratch
resume_mode: auto # or auto or resume_path if
resume_from_path: False
test_freq: -1
critic_warmup: 0
default_hdfs_dir: null
remove_previous_ckpt_in_save: False
del_local_ckpt_after_load: False
default_local_dir: checkpoints/${trainer.project_name}/${trainer.experiment_name}
================================================
FILE: verl/trainer/config/sft_trainer.yaml
================================================
data:
train_batch_size: 256
micro_batch_size: null # will be deprecated, use micro_batch_size_per_gpu
micro_batch_size_per_gpu: 4 # this is also val batch size
train_files: ~/data/gsm8k/train.parquet
val_files: ~/data/gsm8k/test.parquet
prompt_key: question
response_key: answer
max_length: 1024
truncation: error
balance_dp_token: False
chat_template: null
model:
partial_pretrain: ~/models/gemma-1.1-7b-it
fsdp_config:
wrap_policy:
min_num_params: 0
cpu_offload: False
offload_params: False
external_lib: null
enable_gradient_checkpointing: False
trust_remote_code: False
lora_rank: 0 # Set to positive value to enable LoRA (e.g., 32)
lora_alpha: 16 # LoRA scaling factor
target_modules: all-linear # Target modules for LoRA adaptation
use_liger: False
optim:
lr: 1e-5
betas: [0.9, 0.95]
weight_decay: 0.01
warmup_steps_ratio: 0.1
clip_grad: 1.0
ulysses_sequence_parallel_size: 1
use_remove_padding: False
trainer:
default_local_dir: /tmp/sft_model
default_hdfs_dir: hdfs://tmp/experiments/gsm8k/gemma-1.1-7b-it/ # change the hdfs path here
resume_path: null
project_name: gsm8k-sft
experiment_name: test
total_epochs: 4
total_training_steps: null
logger: ['console']
seed: 1
================================================
FILE: verl/trainer/fsdp_sft_trainer.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A lightweight one-file FSDP SFT Trainer
TODO(zhangchi.usc1992)
- Add calculation of mfu
- Add validation
"""
import os
os.environ['NCCL_DEBUG'] = 'WARN'
os.environ['TOKENIZERS_PARALLELISM'] = 'true'
import logging
import re
from contextlib import nullcontext
import torch
import torch.distributed
from torch import nn, optim
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, MixedPrecision, ShardingStrategy, CPUOffload
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForCausalLM, PreTrainedModel, AutoConfig
from verl.utils.torch_functional import get_cosine_schedule_with_warmup
from tensordict import TensorDict
from torch.utils.data import DataLoader, DistributedSampler
from flash_attn.bert_padding import pad_input, unpad_input, rearrange, index_first_axis
from verl.utils.fsdp_utils import get_fsdp_wrap_policy, init_fn, get_init_weight_context_manager
from verl.utils.dataset import SFTDataset
from verl.utils.fs import copy_to_local
from verl.utils.tracking import Tracking
from verl.utils.ulysses import get_ulysses_sequence_parallel_world_size, set_ulysses_sequence_parallel_group
from torch.distributed.device_mesh import DeviceMesh
import verl.utils.hdfs_io as hdfs_io
from verl.utils.debug import log_gpu_memory_usage
from peft import LoraConfig, TaskType, get_peft_model
from verl.workers.sharding_manager import FSDPUlyssesShardingManager
from verl.utils.ulysses import ulysses_pad_and_slice_inputs, gather_outpus_and_unpad
from verl import DataProto
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv('VERL_SFT_LOGGING_LEVEL', 'WARN'))
def extract_step(path):
match = re.search(r'global_step_(\d+)', path)
if match:
return int(match.group(1))
return None
def convert_to_regular_types(obj):
"""Convert Hydra configs and other special types to regular Python types."""
from omegaconf import ListConfig, DictConfig
if isinstance(obj, (ListConfig, DictConfig)):
return {k: convert_to_regular_types(v) for k, v in obj.items()} if isinstance(obj, DictConfig) else list(obj)
elif isinstance(obj, (list, tuple)):
return [convert_to_regular_types(x) for x in obj]
elif isinstance(obj, dict):
return {k: convert_to_regular_types(v) for k, v in obj.items()}
return obj
class FSDPSFTTrainer(object):
def __init__(self, config, device_mesh: DeviceMesh, ulysses_device_mesh: DeviceMesh):
self.config = config
self.device_mesh = device_mesh
self.ulysses_device_mesh = ulysses_device_mesh
self.sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh)
# build tokenizer first
local_model_path = copy_to_local(src=self.config.model.partial_pretrain, verbose=True)
from verl.utils import hf_tokenizer
self.tokenizer = hf_tokenizer(local_model_path, trust_remote_code=self.config.model.trust_remote_code)
if self.config.data.chat_template is not None:
raise ValueError('Apply Chat template from config is not supported yet.')
# normalize dp size
self._normalize_config_bsz()
# Set sequence parallel size
self.config.ulysses_sequence_parallel_size = getattr(self.config, 'ulysses_sequence_parallel_size', 1)
self.use_remove_padding = getattr(self.config, 'use_remove_padding', False)
if self.device_mesh.get_rank() == 0:
print(f'Using sequence parallel size: {self.config.ulysses_sequence_parallel_size}')
print(f'Using remove padding: {self.use_remove_padding}')
self._build_dataloader()
# build model
self._build_model_optimizer()
# TODO: add checkpoint manager
if self.device_mesh.get_rank() == 0:
print(self.config)
def _normalize_config_bsz(self):
dp_size = self.device_mesh.size(0) if not self.ulysses_device_mesh else self.ulysses_device_mesh.size(0)
if self.device_mesh.get_rank() == 0:
print(f'Normalize batch size by dp {dp_size}')
assert self.config.data.train_batch_size % dp_size == 0, f"Global batch size {self.config.data.train_batch_size} is not divisible by dp size {dp_size}"
self.config.data.train_batch_size //= dp_size
assert self.config.data.train_batch_size % self.config.data.micro_batch_size_per_gpu == 0
def _build_dataloader(self):
config = self.config
# build dataset
self.train_dataset = SFTDataset(parquet_files=config.data.train_files,
tokenizer=self.tokenizer,
prompt_key=config.data.prompt_key,
prompt_dict_keys=config.data.get('prompt_dict_keys', None),
response_key=config.data.response_key,
response_dict_keys=config.data.get('response_dict_keys', None),
max_length=config.data.max_length,
truncation=config.data.truncation)
self.val_dataset = SFTDataset(parquet_files=config.data.val_files,
tokenizer=self.tokenizer,
prompt_key=config.data.prompt_key,
prompt_dict_keys=config.data.get('prompt_dict_keys', None),
response_key=config.data.response_key,
response_dict_keys=config.data.get('response_dict_keys', None),
max_length=config.data.max_length,
truncation=config.data.truncation)
# build dataloader
# Use data parallel rank and size instead of global rank and world size
# If doing SP, we need to use the local rank and size
if self.config.ulysses_sequence_parallel_size > 1:
rank = self.ulysses_device_mesh.get_local_rank('dp')
world_size = self.ulysses_device_mesh.size(0)
if self.ulysses_device_mesh.get_rank() == 0:
print(f'Using SP rank {rank} and size {world_size} for data distribution')
print(f'Each SP rank gets different data, but the same data WITHIN the same rank')
else:
rank = self.device_mesh.get_rank()
world_size = self.device_mesh.size()
if self.device_mesh.get_rank() == 0:
print(f'Using FSDP rank {rank} and size {world_size} for data distribution')
self.train_sampler = DistributedSampler(self.train_dataset,
shuffle=True,
num_replicas=world_size,
rank=rank,
drop_last=True)
self.train_dataloader = DataLoader(dataset=self.train_dataset,
batch_size=config.data.train_batch_size,
sampler=self.train_sampler,
num_workers=8,
pin_memory=True,
drop_last=True)
self.val_sampler = DistributedSampler(self.val_dataset,
shuffle=False,
num_replicas=world_size,
rank=rank,
drop_last=True)
self.val_dataloader = DataLoader(dataset=self.val_dataset,
batch_size=config.data.micro_batch_size_per_gpu,
sampler=self.val_sampler,
num_workers=8,
pin_memory=True,
drop_last=True)
def _build_model_optimizer(self):
# TODO (zhangchi.usc1992):
# 1. support pretrain from random weights
# 2. support init directly from sharded weights
local_model_path = copy_to_local(src=self.config.model.partial_pretrain, verbose=True)
if self.config.model.get('external_lib', None) is not None:
# This is used to import external_lib into the huggingface systems
import importlib
importlib.import_module(self.config.model.external_lib)
log_gpu_memory_usage('Before model allocation', logger=logger)
trust_remote_code = self.config.model.trust_remote_code
# load config first
config = AutoConfig.from_pretrained(local_model_path, trust_remote_code=trust_remote_code)
if self.config.ulysses_sequence_parallel_size > 1:
assert self.use_remove_padding, "Sequence parallel is only supported when remove_padding is enabled"
from verl.models.registry import check_model_support_rmpad
check_model_support_rmpad(config.model_type)
if self.use_remove_padding and self.config.ulysses_sequence_parallel_size > 1:
from verl.models.transformers.monkey_patch import apply_monkey_patch
apply_monkey_patch(config, verbose=True)
# This may be very large
init_context = get_init_weight_context_manager(use_meta_tensor=not config.tie_word_embeddings)
with init_context():
self.model: PreTrainedModel = AutoModelForCausalLM.from_pretrained(local_model_path,
config=config,
torch_dtype=torch.float32,
attn_implementation='flash_attention_2',
trust_remote_code=trust_remote_code)
# Apply Liger kernel if use_liger is enabled
if self.config.model.get('use_liger', False):
from liger_kernel.transformers.monkey_patch import _apply_liger_kernel_to_instance
_apply_liger_kernel_to_instance(model=self.model)
if self.config.model.get('lora_rank', 0) > 0:
self.model.enable_input_require_grads()
# Convert config to regular Python types before creating PEFT model
lora_config = {
'task_type': TaskType.CAUSAL_LM,
'r': self.config.model.lora_rank,
'lora_alpha': self.config.model.lora_alpha,
'target_modules': convert_to_regular_types(self.config.model.target_modules),
'bias': "none"
}
self.model = get_peft_model(self.model, LoraConfig(**lora_config))
if self.config.model.enable_gradient_checkpointing:
self.model.gradient_checkpointing_enable(gradient_checkpointing_kwargs={'use_reentrant': False})
log_gpu_memory_usage('After model allocation', logger=logger)
mixed_precision = MixedPrecision(param_dtype=torch.bfloat16,
reduce_dtype=torch.float32,
buffer_dtype=torch.float32)
auto_wrap_policy = get_fsdp_wrap_policy(self.model,
config=self.config.model.fsdp_config.wrap_policy,
is_lora=self.config.model.get('lora_rank', 0) > 0)
if self.device_mesh.get_rank() == 0:
print(auto_wrap_policy)
if not self.config.model.fsdp_config.cpu_offload:
cpu_offload = None
else:
cpu_offload = CPUOffload(offload_params=self.config.model.fsdp_config.offload_params)
self.fsdp_model = FSDP(module=self.model,
auto_wrap_policy=auto_wrap_policy,
param_init_fn=init_fn,
sharding_strategy=ShardingStrategy.FULL_SHARD,
mixed_precision=mixed_precision,
device_mesh=self.device_mesh,
sync_module_states=True,
device_id=torch.cuda.current_device(),
cpu_offload=cpu_offload,
use_orig_params=False)
log_gpu_memory_usage('After FSDP wrapping', logger=logger)
self.optimizer = optim.AdamW(self.fsdp_model.parameters(),
lr=self.config.optim.lr,
betas=self.config.optim.betas,
weight_decay=self.config.optim.weight_decay)
log_gpu_memory_usage('After initialize optimizer', logger=logger)
self.steps_per_epoch = len(self.train_dataloader)
self.total_steps = self.steps_per_epoch * self.config.trainer.total_epochs
if self.device_mesh.get_rank() == 0:
print(
f'Number of steps/epoch {self.steps_per_epoch}, number of epochs {self.config.trainer.total_epochs}, total number of steps {self.total_steps}'
)
num_warmup_steps = int(self.total_steps * self.config.optim.warmup_steps_ratio)
self.lr_scheduler = get_cosine_schedule_with_warmup(optimizer=self.optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=self.total_steps)
def _compute_loss_and_backward(self, batch, do_backward=True):
"""Compute loss with optional sequence parallelism and remove padding features"""
use_sp = self.use_remove_padding and self.config.ulysses_sequence_parallel_size > 1
# Move inputs to GPU and prepare loss mask
input_ids = batch['input_ids'].cuda()
attention_mask = batch['attention_mask'].cuda()
position_ids = batch['position_ids'].cuda()
loss_mask = batch.pop('loss_mask')[:, :-1].reshape(-1).cuda()
loss_fct = nn.CrossEntropyLoss(reduction='none')
# Context manager for sequence parallel if needed
context = self.sharding_manager if use_sp else nullcontext()
with context:
with torch.autocast(device_type='cuda', dtype=torch.bfloat16):
if not use_sp:
# Standard forward pass without sequence parallel
labels = input_ids[:, 1:].contiguous()
output = self.fsdp_model(input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
use_cache=False)
logits = output.logits
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels.contiguous()
# Flatten the tokens
shift_logits = shift_logits.view(-1, self.model.config.vocab_size)
shift_labels = shift_labels.view(-1)
# Enable model parallelism
shift_labels = shift_labels.to(shift_logits.device)
loss = loss_fct(shift_logits, shift_labels)
loss = loss * loss_mask.to(loss.device)
else:
# IMPORTANT: We have a big assumption here, so we can shard the SAME sequence across SP ranks
# i.e., each GPU has <1 sequence, and each SP group has 1 sequence
# 1. All SP ranks will receive the *SAME* batch
# 2. Different SP groups will receive *DIFFERENT* batches
# This is implemented by the DistributedSampler
batch_size, seqlen = input_ids.shape
# Remove padding
input_ids_rmpad, indices, *_ = unpad_input(input_ids.unsqueeze(-1),
attention_mask) # input_ids_rmpad (total_nnz, ...)
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz)
# Unpad position_ids to align rotary
position_ids_rmpad = index_first_axis(rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."),
indices).transpose(0, 1)
# Pad and slice inputs for sequence parallelism
input_ids_rmpad_sliced, position_ids_rmpad_padded, pad_size = ulysses_pad_and_slice_inputs(
input_ids_rmpad, position_ids_rmpad, sp_size=get_ulysses_sequence_parallel_world_size())
# For computing loss
input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=1) # (1, total_nnz)
input_ids_rmpad_rolled, _, _ = ulysses_pad_and_slice_inputs(
input_ids_rmpad_rolled, None, get_ulysses_sequence_parallel_world_size())
input_ids_rmpad_rolled = input_ids_rmpad_rolled.squeeze(0) # ((total_nnz / sp) + pad)
# Forward pass
output = self.fsdp_model(
input_ids=input_ids_rmpad_sliced,
attention_mask=None, # Not needed with flash attention varlen
position_ids=position_ids_rmpad_padded,
use_cache=False)
# Compute loss locally then aggregate
logits_rmpad = output.logits.squeeze(0)
input_ids_rmpad_rolled = input_ids_rmpad_rolled.to(logits_rmpad.device)
loss = loss_fct(logits_rmpad, input_ids_rmpad_rolled)
# Gather and unpad for sequence parallelism
loss = gather_outpus_and_unpad(loss, gather_dim=0, unpad_dim=0, padding_size=pad_size)
# This is the loss collected from all ulysses ranks
full_loss = pad_input(hidden_states=loss.unsqueeze(-1),
indices=indices,
batch=batch_size,
seqlen=seqlen)
full_loss = full_loss.squeeze(-1)[:, :-1] # Remove last token's loss
full_loss = full_loss.reshape(-1)
loss_mask = loss_mask.to(full_loss.device)
loss = full_loss * loss_mask
valid_token_this_rank = torch.sum(loss_mask)
if self.config.data.balance_dp_token:
torch.distributed.all_reduce(valid_token_this_rank)
dp_size = self.ulysses_device_mesh.size('dp') if use_sp else torch.distributed.get_world_size()
else:
dp_size = 1
loss = torch.sum(loss) / valid_token_this_rank * dp_size
if do_backward:
loss.backward()
return loss
def training_step(self, batch: TensorDict):
self.fsdp_model.train()
log_gpu_memory_usage('Before optimizer zero_grad', logger=logger)
self.optimizer.zero_grad()
log_gpu_memory_usage('After optimizer zero_grad', logger=logger)
micro_batches = batch.split(self.config.data.micro_batch_size_per_gpu)
n_micro_batches = len(micro_batches)
step_loss = 0
for micro_batch in micro_batches:
loss = self._compute_loss_and_backward(batch=micro_batch) / n_micro_batches
step_loss += loss.item()
self.fsdp_model.clip_grad_norm_(max_norm=self.config.optim.clip_grad)
log_gpu_memory_usage('Before optimizer step', logger=logger)
self.optimizer.step()
log_gpu_memory_usage('After optimizer step', logger=logger)
self.lr_scheduler.step()
# reduce loss across dp ranks
lr = self.lr_scheduler.get_last_lr()[0]
log_gpu_memory_usage('After offload weights', logger=logger)
step_loss = torch.tensor(step_loss).cuda()
torch.distributed.all_reduce(step_loss, op=torch.distributed.ReduceOp.AVG)
return {'train/loss': step_loss.detach().item(), 'train/lr(1e-3)': lr * 1e3}
def validation_step(self, batch: TensorDict):
self.fsdp_model.eval()
with torch.no_grad():
loss = self._compute_loss_and_backward(batch, do_backward=False)
torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.AVG)
return loss
def save_checkpoint(self, step):
# save checkpoint
from torch.distributed.fsdp import FullStateDictConfig, StateDictType
cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
with FSDP.state_dict_type(self.fsdp_model, StateDictType.FULL_STATE_DICT, cfg):
state_dict = self.fsdp_model.state_dict()
path = os.path.join(self.config.trainer.default_local_dir, f'global_step_{step}')
# save huggingface model
if self.device_mesh.get_rank() == 0:
os.makedirs(path, exist_ok=True)
self.model.save_pretrained(path, state_dict=state_dict)
self.tokenizer.save_pretrained(path)
if self.config.trainer.default_hdfs_dir:
hdfs_io.makedirs(self.config.trainer.default_hdfs_dir, exist_ok=True)
hdfs_io.copy(src=path, dst=self.config.trainer.default_hdfs_dir, dirs_exist_ok=True)
torch.distributed.barrier()
def fit(self):
rank = self.device_mesh.get_rank()
# TODO: add a unified tracking
if rank == 0:
tracking = Tracking(project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger)
global_step = 0
# compute the total training steps.
# the total training steps in SFT is mainly for early exit
total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs
if self.config.trainer.total_training_steps is not None:
total_training_steps = self.config.trainer.total_training_steps
self.total_training_steps = total_training_steps
print(f'Total training steps: {self.total_training_steps}')
# TODO (zhangchi.usc1992) add back checkpoint manager. Currently, it blocks when uploading to hdfs. So very slow.
for epoch in range(self.config.trainer.total_epochs):
self.train_sampler.set_epoch(epoch=epoch)
for data in tqdm(self.train_dataloader,
total=self.steps_per_epoch,
desc=f"Epoch {epoch+1}/{self.config.trainer.total_epochs}"):
data = TensorDict(data, batch_size=self.config.data.train_batch_size).cuda()
metric = self.training_step(data)
if rank == 0:
tracking.log(data=metric, step=global_step)
global_step += 1
# for early exit validation
if global_step >= self.total_training_steps:
# Perform final validation
val_losses = []
for val_data in self.val_dataloader:
val_data = TensorDict(val_data, batch_size=self.config.data.micro_batch_size_per_gpu).cuda()
val_loss = self.validation_step(val_data)
val_losses.append(val_loss)
if rank == 0:
avg_val_loss = torch.mean(torch.stack(val_losses))
metric = {'val/loss': avg_val_loss.detach().item()}
tracking.log(data=metric, step=global_step)
torch.distributed.barrier()
# Save final checkpoint
self.save_checkpoint(step=global_step)
return
# validation
val_losses = []
for data in self.val_dataloader:
data = TensorDict(data, batch_size=self.config.data.micro_batch_size_per_gpu).cuda()
val_loss = self.validation_step(data)
val_losses.append(val_loss)
if rank == 0:
val_loss = torch.mean(torch.stack(val_losses))
metric = {'val/loss': val_loss.detach().item()}
tracking.log(data=metric, step=global_step)
torch.distributed.barrier()
# save checkpoint
self.save_checkpoint(step=global_step)
from verl.trainer.fsdp_sft_trainer import FSDPSFTTrainer
import hydra
from torch.distributed.device_mesh import init_device_mesh
from verl.utils.distributed import initialize_global_process_group
@hydra.main(config_path='config', config_name='sft_trainer', version_base=None)
def main(config):
local_rank, rank, world_size = initialize_global_process_group()
device_mesh = init_device_mesh(device_type='cuda', mesh_shape=(world_size,), mesh_dim_names=('fsdp',))
dp_size = world_size // config.ulysses_sequence_parallel_size
ulysses_device_mesh = init_device_mesh(device_type='cuda',
mesh_shape=(dp_size, config.ulysses_sequence_parallel_size),
mesh_dim_names=('dp', 'sp'))
trainer = FSDPSFTTrainer(config=config, device_mesh=device_mesh, ulysses_device_mesh=ulysses_device_mesh)
trainer.fit()
if __name__ == '__main__':
main()
================================================
FILE: verl/trainer/main_eval.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Offline evaluate the performance of a generated file using reward model and ground truth verifier.
The input is a parquet file that contains N generated sequences and (optional) the ground truth.
"""
import hydra
from verl.utils.fs import copy_to_local
from verl.utils.reward_score import math, gsm8k
import pandas as pd
import numpy as np
def select_reward_fn(data_source):
if data_source == 'lighteval/MATH':
return math.compute_score
else:
raise NotImplementedError
@hydra.main(config_path='config', config_name='evaluation', version_base=None)
def main(config):
local_path = copy_to_local(config.data.path)
dataset = pd.read_parquet(local_path)
prompts = dataset[config.data.prompt_key]
responses = dataset[config.data.response_key]
data_sources = dataset[config.data.data_source_key]
reward_model_data = dataset[config.data.reward_model_key]
passes = 0
total = len(dataset)
for i in range(total):
response_lst = responses[i]
data_source = data_sources[i]
# select reward score based on data_source
prompt = prompts[i]
reward_data = reward_model_data[i]
reward_fn = select_reward_fn(data_source)
ground_truth = reward_data['ground_truth']
score_lst = []
for r in response_lst:
score = reward_fn(r, ground_truth)
score_lst.append(score)
max_score = np.max(score_lst)
if max_score == 1:
passes += 1
print(f'pass@5: {passes / total}')
if __name__ == '__main__':
main()
================================================
FILE: verl/trainer/main_generation.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generate responses given a dataset of prompts
"""
import ray
import numpy as np
import hydra
import os
os.environ['NCCL_DEBUG'] = 'WARN'
os.environ['TOKENIZERS_PARALLELISM'] = 'true'
# os.environ['TORCH_COMPILE_DISABLE'] = '1'
from verl.utils.model import compute_position_id_with_mask
import pandas as pd
from transformers import AutoTokenizer
from verl import DataProto
from verl.utils.fs import copy_to_local
from verl.workers.fsdp_workers import ActorRolloutRefWorker
from verl.utils.hdfs_io import makedirs
from verl.single_controller.ray import RayClassWithInitArgs, RayResourcePool, RayWorkerGroup
@hydra.main(config_path='config', config_name='generation', version_base=None)
def main(config):
from pprint import pprint
from omegaconf import OmegaConf
pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values
OmegaConf.resolve(config)
local_path = copy_to_local(config.model.path)
from verl.utils import hf_tokenizer
tokenizer = hf_tokenizer(local_path)
if config.rollout.temperature == 0.:
assert config.data.n_samples == 1, 'When temperature=0, n_samples must be 1.'
# read dataset. Note that the dataset should directly contain chat template format (e.g., a list of dictionary)
dataset = pd.read_parquet(config.data.path)
chat_lst = dataset[config.data.prompt_key].tolist()
chat_lst = [chat.tolist() for chat in chat_lst]
tokenizer.padding_side = 'left'
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
ray_cls_with_init = RayClassWithInitArgs(cls=ray.remote(ActorRolloutRefWorker), config=config, role='actor_rollout')
resource_pool = RayResourcePool(process_on_nodes=[config.trainer.n_gpus_per_node] * config.trainer.nnodes)
wg = RayWorkerGroup(resource_pool=resource_pool, ray_cls_with_init=ray_cls_with_init)
wg.init_model()
total_samples = len(dataset)
# real_batch_size = data.batch['input_ids'].shape[0]
config_batch_size = config.data.batch_size
dp_size = wg.world_size // config.rollout.tensor_model_parallel_size
num_batch = (total_samples // config_batch_size) + 1
output_lst = [[] for _ in range(config.data.n_samples)]
for batch_idx in range(num_batch):
print(f'[{batch_idx+1}/{num_batch}] Start to process.')
batch_chat_lst = chat_lst[batch_idx * config_batch_size:(batch_idx + 1) * config_batch_size]
inputs = tokenizer.apply_chat_template(batch_chat_lst,
add_generation_prompt=True,
padding=True,
truncation=True,
max_length=config.rollout.prompt_length,
return_tensors='pt',
return_dict=True,
tokenize=True)
input_ids = inputs['input_ids']
attention_mask = inputs['attention_mask']
position_ids = compute_position_id_with_mask(attention_mask)
batch_dict = {'input_ids': input_ids, 'attention_mask': attention_mask, 'position_ids': position_ids}
data = DataProto.from_dict(batch_dict)
real_batch_size = data.batch['input_ids'].shape[0]
if real_batch_size % dp_size != 0:
dummy_data_size = dp_size - real_batch_size % dp_size
dummy_data = data[:dummy_data_size]
data = DataProto.concat([data, dummy_data])
print(
f'dp_size {dp_size} is not divisible by real_batch_size {real_batch_size}, add {dummy_data_size} dummy data'
)
batch_size = data.batch['input_ids'].shape[0]
assert batch_size % dp_size == 0, f'batch_size {batch_size} is not divisible by dp_size {dp_size}'
print(f'[{batch_idx+1}/{num_batch}] Start to generate.')
# START TO GENERATE FOR n_samples TIMES
for i in range(config.data.n_samples):
output = wg.generate_sequences(data)
# remove dummy data
output = output[:real_batch_size]
output_text = tokenizer.batch_decode(output.batch['input_ids'][:, -config.rollout.response_length:],
skip_special_tokens=False)
# remove the padding
pad_token = tokenizer.pad_token
output_text_unpad = []
for text in output_text:
output_text_unpad.append(text.replace(pad_token, ''))
output_lst[i].extend(output_text_unpad)
# convert output_lst from (n_samples, n_data) to (n_data, n_sampels)
output_lst = np.array(output_lst, dtype=object)
output_lst = np.transpose(output_lst, axes=(1, 0)).tolist()
# add to the data frame
dataset[f'responses'] = output_lst
# write to a new parquet
output_dir = os.path.dirname(config.data.output_path)
makedirs(output_dir, exist_ok=True)
dataset.to_parquet(config.data.output_path)
return output_text
if __name__ == '__main__':
main()
================================================
FILE: verl/trainer/main_ppo.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Note that we don't combine the main with ray_trainer as ray_trainer is used by other main.
"""
from verl.trainer.ppo.ray_trainer import RayPPOTrainer
import ray
import hydra
@hydra.main(config_path='config', config_name='ppo_trainer', version_base=None)
def main(config):
run_ppo(config)
def run_ppo(config, compute_score=None):
if not ray.is_initialized():
# this is for local ray cluster
ray.init(runtime_env={'env_vars': {'TOKENIZERS_PARALLELISM': 'true', 'NCCL_DEBUG': 'WARN'}})
ray.get(main_task.remote(config, compute_score))
@ray.remote(num_cpus=1) # please make sure main_task is not scheduled on head
def main_task(config, compute_score=None):
from verl.utils.fs import copy_to_local
# print initial config
from pprint import pprint
from omegaconf import OmegaConf
pprint(OmegaConf.to_container(config, resolve=True)) # resolve=True will eval symbol values
OmegaConf.resolve(config)
# download the checkpoint from hdfs
local_path = copy_to_local(config.actor_rollout_ref.model.path)
# instantiate tokenizer
from verl.utils import hf_tokenizer, hf_processor
tokenizer = hf_tokenizer(local_path)
processor = hf_processor(local_path, use_fast=True) # used for multimodal LLM, could be none
# define worker classes
if config.actor_rollout_ref.actor.strategy == 'fsdp':
assert config.actor_rollout_ref.actor.strategy == config.critic.strategy
from verl.workers.fsdp_workers import ActorRolloutRefWorker, CriticWorker
from verl.single_controller.ray import RayWorkerGroup
ray_worker_group_cls = RayWorkerGroup
elif config.actor_rollout_ref.actor.strategy == 'megatron':
assert config.actor_rollout_ref.actor.strategy == config.critic.strategy
from verl.workers.megatron_workers import ActorRolloutRefWorker, CriticWorker
from verl.single_controller.ray.megatron import NVMegatronRayWorkerGroup
ray_worker_group_cls = NVMegatronRayWorkerGroup
else:
raise NotImplementedError
from verl.trainer.ppo.ray_trainer import ResourcePoolManager, Role
role_worker_mapping = {
Role.ActorRollout: ray.remote(ActorRolloutRefWorker),
Role.Critic: ray.remote(CriticWorker),
}
if not (config.actor_rollout_ref.actor.kl_loss_coef==0 and config.algorithm.kl_ctrl.kl_coef==0):
role_worker_mapping[Role.RefPolicy] = ray.remote(ActorRolloutRefWorker)
else:
config.actor_rollout_ref.actor.use_kl_loss=False
global_pool_id = 'global_pool'
resource_pool_spec = {
global_pool_id: [config.trainer.n_gpus_per_node] * config.trainer.nnodes,
}
mapping = {
Role.ActorRollout: global_pool_id,
Role.Critic: global_pool_id,
Role.RefPolicy: global_pool_id,
}
# we should adopt a multi-source reward function here
# - for rule-based rm, we directly call a reward score
# - for model-based rm, we call a model
# - for code related prompt, we send to a sandbox if there are test cases
# - finally, we combine all the rewards together
# - The reward type depends on the tag of the data
if config.reward_model.enable:
if config.reward_model.strategy == 'fsdp':
from verl.workers.fsdp_workers import RewardModelWorker
elif config.reward_model.strategy == 'megatron':
from verl.workers.megatron_workers import RewardModelWorker
else:
raise NotImplementedError
role_worker_mapping[Role.RewardModel] = ray.remote(RewardModelWorker)
mapping[Role.RewardModel] = global_pool_id
reward_manager_name = config.reward_model.get("reward_manager", "naive")
if reward_manager_name == 'naive':
from verl.workers.reward_manager import NaiveRewardManager
reward_manager_cls = NaiveRewardManager
elif reward_manager_name == 'prime':
from verl.workers.reward_manager import PrimeRewardManager
reward_manager_cls = PrimeRewardManager
else:
raise NotImplementedError
reward_fn = reward_manager_cls(config=config, tokenizer=tokenizer, num_examine=0, compute_score=compute_score)
# Note that we always use function-based RM for validation
val_reward_fn = reward_manager_cls(config=config, tokenizer=tokenizer, num_examine=1, compute_score=compute_score)
resource_pool_manager = ResourcePoolManager(resource_pool_spec=resource_pool_spec, mapping=mapping)
trainer = RayPPOTrainer(config=config,
tokenizer=tokenizer,
processor=processor,
role_worker_mapping=role_worker_mapping,
resource_pool_manager=resource_pool_manager,
ray_worker_group_cls=ray_worker_group_cls,
reward_fn=reward_fn,
val_reward_fn=val_reward_fn)
trainer.init_workers()
trainer.fit()
if __name__ == '__main__':
main()
================================================
FILE: verl/trainer/ppo/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/trainer/ppo/core_algos.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Core functions to implement PPO algorithms.
The function implemented in this file should be used by trainer with different distributed strategies to
implement PPO
"""
import numpy as np
import torch
from collections import defaultdict
import verl.utils.torch_functional as verl_F
class AdaptiveKLController:
"""
Adaptive KL controller described in the paper:
https://arxiv.org/pdf/1909.08593.pdf
"""
def __init__(self, init_kl_coef, target_kl, horizon):
self.value = init_kl_coef
self.target = target_kl
self.horizon = horizon
def update(self, current_kl, n_steps):
target = self.target
proportional_error = np.clip(current_kl / target - 1, -0.2, 0.2)
mult = 1 + proportional_error * n_steps / self.horizon
self.value *= mult
class FixedKLController:
"""Fixed KL controller."""
def __init__(self, kl_coef):
self.value = kl_coef
def update(self, current_kl, n_steps):
pass
def get_kl_controller(config):
if config.critic.kl_ctrl.type == 'fixed':
kl_ctrl = FixedKLController(kl_coef=config.critic.kl_ctrl.kl_coef)
elif config.critic.kl_ctrl.type == 'adaptive':
assert config.kl_ctrl.horizon > 0, f'horizon must be larger than 0. Got {config.critic.kl_ctrl.horizon}'
kl_ctrl = AdaptiveKLController(init_kl_coef=config.critic.kl_ctrl.kl_coef,
target_kl=config.critic.kl_ctrl.target_kl,
horizon=config.critic.kl_ctrl.horizon)
else:
raise ValueError('Unknown kl_ctrl type')
return kl_ctrl
def compute_gae_advantage_return(token_level_rewards: torch.Tensor, values: torch.Tensor, eos_mask: torch.Tensor,
gamma: torch.Tensor, lam: torch.Tensor):
"""Adapted from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py
Args:
token_level_rewards: `(torch.Tensor)`
shape: (bs, response_length)
values: `(torch.Tensor)`
shape: (bs, response_length)
eos_mask: `(torch.Tensor)`
shape: (bs, response_length). [EOS] mask. The token after [EOS] have mask zero.
gamma: `(float)`
discounted factor used in RL
lam: `(float)`
lambda value when computing Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438)
Returns:
advantages: `(torch.Tensor)`
shape: (bs, response_length)
Returns: `(torch.Tensor)`
shape: (bs, response_length)
"""
with torch.no_grad():
lastgaelam = 0
advantages_reversed = []
gen_len = token_level_rewards.shape[-1]
for t in reversed(range(gen_len)):
nextvalues = values[:, t + 1] if t < gen_len - 1 else 0.0
delta = token_level_rewards[:, t] + gamma * nextvalues - values[:, t]
lastgaelam = delta + gamma * lam * lastgaelam
advantages_reversed.append(lastgaelam)
advantages = torch.stack(advantages_reversed[::-1], dim=1)
returns = advantages + values
advantages = verl_F.masked_whiten(advantages, eos_mask)
return advantages, returns
# NOTE(sgm): this implementation only consider outcome supervision, where the reward is a scalar.
def compute_grpo_outcome_advantage(token_level_rewards: torch.Tensor,
eos_mask: torch.Tensor,
index: torch.Tensor,
epsilon: float = 1e-6):
"""
Compute advantage for GRPO, operating only on Outcome reward
(with only one scalar reward for each response).
Args:
token_level_rewards: `(torch.Tensor)`
shape: (bs, response_length)
eos_mask: `(torch.Tensor)`
shape: (bs, response_length)
Returns:
advantages: `(torch.Tensor)`
shape: (bs, response_length)
Returns: `(torch.Tensor)`
shape: (bs, response_length)
"""
response_length = token_level_rewards.shape[-1]
scores = token_level_rewards.sum(dim=-1)
id2score = defaultdict(list)
id2mean = {}
id2std = {}
with torch.no_grad():
bsz = scores.shape[0]
for i in range(bsz):
id2score[index[i]].append(scores[i])
for idx in id2score:
if len(id2score[idx]) == 1:
id2mean[idx] = torch.tensor(0.0)
id2std[idx] = torch.tensor(1.0)
elif len(id2score[idx]) > 1:
id2mean[idx] = torch.mean(torch.tensor(id2score[idx]))
id2std[idx] = torch.std(torch.tensor([id2score[idx]]))
else:
raise ValueError(f"no score in prompt index: {idx}")
for i in range(bsz):
scores[i] = (scores[i] - id2mean[index[i]]) / (id2std[index[i]] + epsilon)
scores = scores.unsqueeze(-1).tile([1, response_length]) * eos_mask
return scores, scores
def compute_rloo_outcome_advantage(token_level_rewards: torch.Tensor,
eos_mask: torch.Tensor,
index: torch.Tensor,
epsilon: float = 1e-6):
"""
Compute advantage for RLOO based on https://arxiv.org/abs/2402.14740
Args:
token_level_rewards: `(torch.Tensor)`
shape: (bs, response_length)
eos_mask: `(torch.Tensor)`
shape: (bs, response_length)
Returns:
advantages: `(torch.Tensor)`
shape: (bs, response_length)
Returns: `(torch.Tensor)`
shape: (bs, response_length)
"""
response_length = token_level_rewards.shape[-1]
scores = token_level_rewards.sum(dim=-1)
id2score = defaultdict(list)
id2mean = {}
with torch.no_grad():
bsz = scores.shape[0]
for i in range(bsz):
id2score[index[i]].append(scores[i])
for idx in id2score:
if len(id2score[idx]) == 1:
id2mean[idx] = torch.tensor(0.0)
elif len(id2score[idx]) > 1:
id2mean[idx] = torch.mean(torch.tensor(id2score[idx]))
else:
raise ValueError(f"no score in prompt index: {idx}")
for i in range(bsz):
response_num = len(id2score[index[i]])
if response_num > 1:
scores[i] = scores[i] * response_num / (response_num -
1) - id2mean[index[i]] * response_num / (response_num - 1)
scores = scores.unsqueeze(-1).tile([1, response_length]) * eos_mask
return scores, scores
def compute_reinforce_plus_plus_outcome_advantage(token_level_rewards: torch.Tensor, eos_mask: torch.Tensor,
gamma: torch.Tensor):
"""
Compute advantage for REINFORCE++.
This implementation is based on the paper: https://arxiv.org/abs/2501.03262
Args:
token_level_rewards: `(torch.Tensor)`
shape: (bs, response_length)
eos_mask: `(torch.Tensor)`
shape: (bs, response_length)
Returns:
advantages: `(torch.Tensor)`
shape: (bs, response_length)
Returns: `(torch.Tensor)`
shape: (bs, response_length)
"""
with torch.no_grad():
returns = torch.zeros_like(token_level_rewards)
running_return = 0
for t in reversed(range(token_level_rewards.shape[1])):
running_return = token_level_rewards[:, t] + gamma * running_return
returns[:, t] = running_return
# Reset after EOS
running_return = running_return * eos_mask[:, t]
advantages = verl_F.masked_whiten(returns, eos_mask)
advantages = advantages * eos_mask
return advantages, returns
def compute_remax_outcome_advantage(token_level_rewards: torch.Tensor, reward_baselines: torch.Tensor,
eos_mask: torch.Tensor):
"""
Compute advantage for ReMax, operating only on Outcome reward
This implementation is based on the paper: https://arxiv.org/abs/2310.10505
(with only one scalar reward for each response).
Args:
token_level_rewards: `(torch.Tensor)`
shape: (bs, response_length)
reward_baselines: `(torch.Tensor)`
shape: (bs,)
eos_mask: `(torch.Tensor)`
shape: (bs, response_length)
Returns:
advantages: `(torch.Tensor)`
shape: (bs, response_length)
Returns: `(torch.Tensor)`
shape: (bs, response_length)
"""
response_length = token_level_rewards.shape[-1]
scores = token_level_rewards.sum(dim=-1)
with torch.no_grad():
returns = (token_level_rewards * eos_mask).flip(dims=[-1]).cumsum(dim=-1).flip(dims=[-1])
advantages = returns - reward_baselines.unsqueeze(-1).tile([1, response_length]) * eos_mask
return advantages, returns
def compute_rewards(token_level_scores, old_log_prob, ref_log_prob, kl_ratio):
kl = old_log_prob - ref_log_prob
return token_level_scores - kl * kl_ratio
def compute_policy_loss(old_log_prob, log_prob, advantages, eos_mask, cliprange):
"""Adapted from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py#L1122
Args:
old_log_prob: `(torch.Tensor)`
shape: (bs, response_length)
log_prob: `(torch.Tensor)`
shape: (bs, response_length)
advantages: `(torch.Tensor)`
shape: (bs, response_length)
eos_mask: `(torch.Tensor)`
shape: (bs, response_length)
cliprange: (float)
The clip range used in PPO. See https://arxiv.org/abs/1707.06347
Returns:
pg_loss: `a scalar torch.Tensor`
policy gradient loss computed via PPO
pg_clipfrac: (float)
a float number indicating the fraction of policy gradient loss being clipped
"""
negative_approx_kl = log_prob - old_log_prob
ratio = torch.exp(negative_approx_kl)
ppo_kl = verl_F.masked_mean(-negative_approx_kl, eos_mask)
pg_losses = -advantages * ratio
pg_losses2 = -advantages * torch.clamp(ratio, 1.0 - cliprange, 1.0 + cliprange)
pg_loss = verl_F.masked_mean(torch.max(pg_losses, pg_losses2), eos_mask)
pg_clipfrac = verl_F.masked_mean(torch.gt(pg_losses2, pg_losses).float(), eos_mask)
return pg_loss, pg_clipfrac, ppo_kl
def compute_entropy_loss(logits, eos_mask):
"""Compute Categorical entropy loss
Args:
logits: `(torch.Tensor)`
shape: (bs, response_length, vocab_size)
eos_mask: `(torch.Tensor)`
shape: (bs, response_length)
Returns:
entropy: a scalar torch.Tensor
"""
# compute entropy
entropy = verl_F.entropy_from_logits(logits) # (bs, response_len)
entropy_loss = verl_F.masked_mean(entropy, mask=eos_mask)
return entropy_loss
def compute_value_loss(vpreds, returns, values, eos_mask, cliprange_value):
"""Compute the value loss. Copied from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py#L1151
Args:
vpreds (`torch.FloatTensor`):
Predicted values of the value head, shape (`batch_size`, `response_length`)
values (`torch.FloatTensor`):
Old values of value head, shape (`batch_size`, `response_length`)
returns: (`torch.FloatTensor`):
Ground truth returns, shape (`batch_size`, `response_length`)
Returns:
vf_loss: a scalar (`torch.FloatTensor`):
value function loss
vf_clipfrac: a float
The ratio of vf being clipped
"""
vpredclipped = verl_F.clip_by_value(vpreds, values - cliprange_value, values + cliprange_value)
vf_losses1 = (vpreds - returns)**2
vf_losses2 = (vpredclipped - returns)**2
vf_loss = 0.5 * verl_F.masked_mean(torch.max(vf_losses1, vf_losses2), eos_mask)
vf_clipfrac = verl_F.masked_mean(torch.gt(vf_losses2, vf_losses1).float(), eos_mask)
return vf_loss, vf_clipfrac
def kl_penalty(logprob: torch.FloatTensor, ref_logprob: torch.FloatTensor, kl_penalty) -> torch.FloatTensor:
"""Compute KL divergence given logprob and ref_logprob.
Copied from https://github.com/huggingface/trl/blob/main/trl/trainer/ppo_trainer.py#L1104
Args:
logprob:
ref_logprob:
Returns:
"""
if kl_penalty == "kl":
return logprob - ref_logprob
if kl_penalty == "abs":
return (logprob - ref_logprob).abs()
if kl_penalty == "mse":
return 0.5 * (logprob - ref_logprob).square()
# J. Schulman. Approximating kl divergence, 2020.
# # URL http://joschu.net/blog/kl-approx.html.
if kl_penalty == 'low_var_kl':
kl = ref_logprob - logprob
ratio = torch.exp(kl)
kld = (ratio - kl - 1).contiguous()
return torch.clamp(kld, min=-10, max=10)
if kl_penalty == "full":
# so, here logprob and ref_logprob should contain the logits for every token in vocabulary
raise NotImplementedError
raise NotImplementedError
================================================
FILE: verl/trainer/ppo/ray_trainer.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
FSDP PPO Trainer with Ray-based single controller.
This trainer supports model-agonistic model initialization with huggingface
"""
import os
import uuid
from contextlib import contextmanager
from dataclasses import dataclass, field
from enum import Enum
from pprint import pprint
from typing import Type, Dict
from copy import deepcopy
import json
import numpy as np
from codetiming import Timer
from omegaconf import OmegaConf, open_dict
from verl import DataProto
from verl.protocol import pad_dataproto_to_divisor, unpad_dataproto
from verl.single_controller.base import Worker
from verl.single_controller.ray import RayResourcePool, RayWorkerGroup, RayClassWithInitArgs
from verl.single_controller.ray.base import create_colocated_worker_cls
from verl.trainer.ppo import core_algos
from verl.utils.seqlen_balancing import get_seqlen_balanced_partitions, log_seqlen_unbalance
from verl.utils.checkpoint.checkpoint_manager import find_latest_ckpt_path
from verl.utils.dataset.rl_dataset import RLHFDataset, collate_fn
from torch.utils.data import RandomSampler, SequentialSampler
from torchdata.stateful_dataloader import StatefulDataLoader
WorkerType = Type[Worker]
class Role(Enum):
"""
To create more roles dynamically, you can subclass Role and add new members
"""
Actor = 0
Rollout = 1
ActorRollout = 2
Critic = 3
RefPolicy = 4
RewardModel = 5
ActorRolloutRef = 6
class AdvantageEstimator(str, Enum):
"""
Using an enumeration class to avoid spelling errors in adv_estimator
"""
GAE = 'gae'
GRPO = 'grpo'
REINFORCE_PLUS_PLUS = 'reinforce_plus_plus'
REMAX = 'remax'
RLOO = 'rloo'
@dataclass
class ResourcePoolManager:
"""
Define a resource pool specification. Resource pool will be initialized first.
Mapping
"""
resource_pool_spec: dict[str, list[int]]
mapping: dict[Role, str]
resource_pool_dict: dict[str, RayResourcePool] = field(default_factory=dict)
def create_resource_pool(self):
for resource_pool_name, process_on_nodes in self.resource_pool_spec.items():
# max_colocate_count means the number of WorkerGroups (i.e. processes) in each RayResourcePool
# For FSDP backend, we recommend using max_colocate_count=1 that merge all WorkerGroups into one.
# For Megatron backend, we recommend using max_colocate_count>1 that can utilize different WorkerGroup for differnt models
resource_pool = RayResourcePool(process_on_nodes=process_on_nodes,
use_gpu=True,
max_colocate_count=1,
name_prefix=resource_pool_name)
self.resource_pool_dict[resource_pool_name] = resource_pool
def get_resource_pool(self, role: Role) -> RayResourcePool:
"""Get the resource pool of the worker_cls"""
return self.resource_pool_dict[self.mapping[role]]
import torch
from verl.utils.torch_functional import masked_mean
def apply_kl_penalty(data: DataProto, kl_ctrl: core_algos.AdaptiveKLController, kl_penalty='kl'):
responses = data.batch['responses']
response_length = responses.size(1)
token_level_scores = data.batch['token_level_scores']
batch_size = data.batch.batch_size[0]
attention_mask = data.batch['attention_mask']
response_mask = attention_mask[:, -response_length:]
# compute kl between ref_policy and current policy
if 'ref_log_prob' in data.batch.keys():
kld = core_algos.kl_penalty(data.batch['old_log_probs'], data.batch['ref_log_prob'],
kl_penalty=kl_penalty) # (batch_size, response_length)
kld = kld * response_mask
beta = kl_ctrl.value
else:
beta = 0
kld = torch.zeros_like(response_mask, dtype=torch.float32)
token_level_rewards = token_level_scores - beta * kld
current_kl = masked_mean(kld, mask=response_mask, axis=-1) # average over sequence
current_kl = torch.mean(current_kl, dim=0).item()
# according to https://github.com/huggingface/trl/blob/951ca1841f29114b969b57b26c7d3e80a39f75a0/trl/trainer/ppo_trainer.py#L837
kl_ctrl.update(current_kl=current_kl, n_steps=batch_size)
data.batch['token_level_rewards'] = token_level_rewards
metrics = {'critic/kl': current_kl, 'critic/kl_coeff': beta}
return data, metrics
def compute_advantage(data: DataProto, adv_estimator, gamma=1.0, lam=1.0, num_repeat=1):
# prepare response group
# TODO: add other ways to estimate advantages
if adv_estimator == AdvantageEstimator.GAE:
values = data.batch['values']
responses = data.batch['responses']
response_length = responses.size(-1)
attention_mask = data.batch['attention_mask']
response_mask = attention_mask[:, -response_length:]
token_level_rewards = data.batch['token_level_rewards']
advantages, returns = core_algos.compute_gae_advantage_return(token_level_rewards=token_level_rewards,
values=values,
eos_mask=response_mask,
gamma=gamma,
lam=lam)
data.batch['advantages'] = advantages
data.batch['returns'] = returns
elif adv_estimator == AdvantageEstimator.GRPO:
token_level_rewards = data.batch['token_level_rewards']
index = data.non_tensor_batch['uid']
responses = data.batch['responses']
response_length = responses.size(-1)
attention_mask = data.batch['attention_mask']
response_mask = attention_mask[:, -response_length:]
advantages, returns = core_algos.compute_grpo_outcome_advantage(token_level_rewards=token_level_rewards,
eos_mask=response_mask,
index=index)
data.batch['advantages'] = advantages
data.batch['returns'] = returns
elif adv_estimator == AdvantageEstimator.REINFORCE_PLUS_PLUS:
token_level_rewards = data.batch['token_level_rewards']
responses = data.batch['responses']
response_length = responses.size(-1)
attention_mask = data.batch['attention_mask']
response_mask = attention_mask[:, -response_length:]
advantages, returns = core_algos.compute_reinforce_plus_plus_outcome_advantage(
token_level_rewards=token_level_rewards, eos_mask=response_mask, gamma=gamma)
data.batch['advantages'] = advantages
data.batch['returns'] = returns
elif adv_estimator == AdvantageEstimator.REMAX:
token_level_rewards = data.batch['token_level_rewards']
index = data.non_tensor_batch['uid']
responses = data.batch['responses']
response_length = responses.size(-1)
attention_mask = data.batch['attention_mask']
response_mask = attention_mask[:, -response_length:]
reward_baselines = data.batch['reward_baselines']
advantages, returns = core_algos.compute_remax_outcome_advantage(token_level_rewards=token_level_rewards,
reward_baselines=reward_baselines,
eos_mask=response_mask)
data.batch['advantages'] = advantages
data.batch['returns'] = returns
elif adv_estimator == AdvantageEstimator.RLOO:
token_level_rewards = data.batch['token_level_rewards']
index = data.non_tensor_batch['uid']
responses = data.batch['responses']
response_length = responses.size(-1)
attention_mask = data.batch['attention_mask']
response_mask = attention_mask[:, -response_length:]
advantages, returns = core_algos.compute_rloo_outcome_advantage(token_level_rewards=token_level_rewards,
eos_mask=response_mask,
index=index)
data.batch['advantages'] = advantages
data.batch['returns'] = returns
else:
raise NotImplementedError
return data
def reduce_metrics(metrics: dict):
for key, val in metrics.items():
metrics[key] = np.mean(val)
return metrics
def _compute_response_info(batch):
response_length = batch.batch['responses'].shape[-1]
prompt_mask = batch.batch['attention_mask'][:, :-response_length]
response_mask = batch.batch['attention_mask'][:, -response_length:]
prompt_length = prompt_mask.sum(-1).float()
response_length = response_mask.sum(-1).float() # (batch_size,)
return dict(
response_mask=response_mask,
prompt_length=prompt_length,
response_length=response_length,
)
def compute_data_metrics(batch, use_critic=True):
# TODO: add response length
sequence_score = batch.batch['token_level_scores'].sum(-1)
sequence_reward = batch.batch['token_level_rewards'].sum(-1)
advantages = batch.batch['advantages']
returns = batch.batch['returns']
max_response_length = batch.batch['responses'].shape[-1]
prompt_mask = batch.batch['attention_mask'][:, :-max_response_length].bool()
response_mask = batch.batch['attention_mask'][:, -max_response_length:].bool()
max_prompt_length = prompt_mask.size(-1)
response_info = _compute_response_info(batch)
prompt_length = response_info['prompt_length']
response_length = response_info['response_length']
valid_adv = torch.masked_select(advantages, response_mask)
valid_returns = torch.masked_select(returns, response_mask)
if use_critic:
values = batch.batch['values']
valid_values = torch.masked_select(values, response_mask)
return_diff_var = torch.var(valid_returns - valid_values)
return_var = torch.var(valid_returns)
metrics = {
# score
'critic/score/mean':
torch.mean(sequence_score).detach().item(),
'critic/score/max':
torch.max(sequence_score).detach().item(),
'critic/score/min':
torch.min(sequence_score).detach().item(),
# reward
'critic/rewards/mean':
torch.mean(sequence_reward).detach().item(),
'critic/rewards/max':
torch.max(sequence_reward).detach().item(),
'critic/rewards/min':
torch.min(sequence_reward).detach().item(),
# adv
'critic/advantages/mean':
torch.mean(valid_adv).detach().item(),
'critic/advantages/max':
torch.max(valid_adv).detach().item(),
'critic/advantages/min':
torch.min(valid_adv).detach().item(),
# returns
'critic/returns/mean':
torch.mean(valid_returns).detach().item(),
'critic/returns/max':
torch.max(valid_returns).detach().item(),
'critic/returns/min':
torch.min(valid_returns).detach().item(),
**({
# values
'critic/values/mean': torch.mean(valid_values).detach().item(),
'critic/values/max': torch.max(valid_values).detach().item(),
'critic/values/min': torch.min(valid_values).detach().item(),
# vf explained var
'critic/vf_explained_var': (1.0 - return_diff_var / (return_var + 1e-5)).detach().item(),
} if use_critic else {}),
# response length
'response_length/mean':
torch.mean(response_length).detach().item(),
'response_length/max':
torch.max(response_length).detach().item(),
'response_length/min':
torch.min(response_length).detach().item(),
'response_length/clip_ratio':
torch.mean(torch.eq(response_length, max_response_length).float()).detach().item(),
# prompt length
'prompt_length/mean':
torch.mean(prompt_length).detach().item(),
'prompt_length/max':
torch.max(prompt_length).detach().item(),
'prompt_length/min':
torch.min(prompt_length).detach().item(),
'prompt_length/clip_ratio':
torch.mean(torch.eq(prompt_length, max_prompt_length).float()).detach().item(),
}
return metrics
def compute_timing_metrics(batch, timing_raw):
response_info = _compute_response_info(batch)
num_prompt_tokens = torch.sum(response_info['prompt_length']).item()
num_response_tokens = torch.sum(response_info['response_length']).item()
num_overall_tokens = num_prompt_tokens + num_response_tokens
num_tokens_of_section = {
'gen': num_response_tokens,
**{
name: num_overall_tokens for name in ['ref', 'values', 'adv', 'update_critic', 'update_actor']
},
}
return {
**{
f'timing_s/{name}': value for name, value in timing_raw.items()
},
**{
f'timing_per_token_ms/{name}': timing_raw[name] * 1000 / num_tokens_of_section[name] for name in set(num_tokens_of_section.keys(
)) & set(timing_raw.keys())
},
}
@contextmanager
def _timer(name: str, timing_raw: Dict[str, float]):
with Timer(name=name, logger=None) as timer:
yield
timing_raw[name] = timer.last
class RayPPOTrainer(object):
"""
Note that this trainer runs on the driver process on a single CPU/GPU node.
"""
# TODO: support each role have individual ray_worker_group_cls,
# i.e., support different backend of different role
def __init__(self,
config,
tokenizer,
role_worker_mapping: dict[Role, WorkerType],
resource_pool_manager: ResourcePoolManager,
ray_worker_group_cls: RayWorkerGroup = RayWorkerGroup,
processor=None,
reward_fn=None,
val_reward_fn=None):
# assert torch.cuda.is_available(), 'cuda must be available on driver'
self.tokenizer = tokenizer
self.processor = processor
self.config = config
self.reward_fn = reward_fn
self.val_reward_fn = val_reward_fn
self.hybrid_engine = config.actor_rollout_ref.hybrid_engine
assert self.hybrid_engine, 'Currently, only support hybrid engine'
if self.hybrid_engine:
assert Role.ActorRollout in role_worker_mapping, f'{role_worker_mapping.keys()=}'
self.role_worker_mapping = role_worker_mapping
self.resource_pool_manager = resource_pool_manager
self.use_reference_policy = Role.RefPolicy in role_worker_mapping
self.use_rm = Role.RewardModel in role_worker_mapping
self.ray_worker_group_cls = ray_worker_group_cls
# define KL control
if self.use_reference_policy:
if config.algorithm.kl_ctrl.type == 'fixed':
self.kl_ctrl = core_algos.FixedKLController(kl_coef=config.algorithm.kl_ctrl.kl_coef)
elif config.algorithm.kl_ctrl.type == 'adaptive':
assert config.algorithm.kl_ctrl.horizon > 0, f'horizon must be larger than 0. Got {config.critic.kl_ctrl.horizon}'
self.kl_ctrl = core_algos.AdaptiveKLController(init_kl_coef=config.algorithm.kl_ctrl.kl_coef,
target_kl=config.algorithm.kl_ctrl.target_kl,
horizon=config.algorithm.kl_ctrl.horizon)
else:
raise NotImplementedError
else:
self.kl_ctrl = core_algos.FixedKLController(kl_coef=0.)
if self.config.algorithm.adv_estimator == AdvantageEstimator.GAE:
self.use_critic = True
elif self.config.algorithm.adv_estimator in [
AdvantageEstimator.GRPO, AdvantageEstimator.REINFORCE_PLUS_PLUS, AdvantageEstimator.REMAX,
AdvantageEstimator.RLOO
]:
self.use_critic = False
else:
raise NotImplementedError
self._validate_config()
self._create_dataloader()
def _validate_config(self):
config = self.config
# number of GPUs total
n_gpus = config.trainer.n_gpus_per_node * config.trainer.nnodes
# 1. Check total batch size for data correctness
real_train_batch_size = config.data.train_batch_size * config.actor_rollout_ref.rollout.n
assert real_train_batch_size % n_gpus == 0, \
f"real_train_batch_size ({real_train_batch_size}) must be divisible by total n_gpus ({n_gpus})."
# A helper function to check "micro_batch_size" vs "micro_batch_size_per_gpu"
# We throw an error if the user sets both. The new convention is "..._micro_batch_size_per_gpu".
def check_mutually_exclusive(mbs, mbs_per_gpu, name: str):
if mbs is None and mbs_per_gpu is None:
raise ValueError(f"[{name}] Please set at least one of '{name}.micro_batch_size' or "
f"'{name}.micro_batch_size_per_gpu'.")
if mbs is not None and mbs_per_gpu is not None:
raise ValueError(f"[{name}] You have set both '{name}.micro_batch_size' AND "
f"'{name}.micro_batch_size_per_gpu'. Please remove '{name}.micro_batch_size' "
f"because only '*_micro_batch_size_per_gpu' is supported (the former is deprecated).")
if not config.actor_rollout_ref.actor.use_dynamic_bsz:
# actor: ppo_micro_batch_size vs. ppo_micro_batch_size_per_gpu
check_mutually_exclusive(config.actor_rollout_ref.actor.ppo_micro_batch_size,
config.actor_rollout_ref.actor.ppo_micro_batch_size_per_gpu,
"actor_rollout_ref.actor")
# reference: log_prob_micro_batch_size vs. log_prob_micro_batch_size_per_gpu
check_mutually_exclusive(config.actor_rollout_ref.ref.log_prob_micro_batch_size,
config.actor_rollout_ref.ref.log_prob_micro_batch_size_per_gpu,
"actor_rollout_ref.ref")
# The rollout section also has log_prob_micro_batch_size vs. log_prob_micro_batch_size_per_gpu
check_mutually_exclusive(config.actor_rollout_ref.rollout.log_prob_micro_batch_size,
config.actor_rollout_ref.rollout.log_prob_micro_batch_size_per_gpu,
"actor_rollout_ref.rollout")
if self.use_critic and not config.critic.use_dynamic_bsz:
# Check for critic micro-batch size conflicts
check_mutually_exclusive(config.critic.ppo_micro_batch_size, config.critic.ppo_micro_batch_size_per_gpu,
"critic")
# Check for reward model micro-batch size conflicts
if config.reward_model.enable and not config.reward_model.use_dynamic_bsz:
check_mutually_exclusive(config.reward_model.micro_batch_size, config.reward_model.micro_batch_size_per_gpu,
"reward_model")
# Actor
# if NOT dynamic_bsz, we must ensure:
# ppo_mini_batch_size is divisible by ppo_micro_batch_size
# ppo_micro_batch_size * sequence_parallel_size >= n_gpus
if not config.actor_rollout_ref.actor.use_dynamic_bsz:
sp_size = config.actor_rollout_ref.actor.get('ulysses_sequence_parallel_size', 1)
if config.actor_rollout_ref.actor.ppo_micro_batch_size is not None:
assert config.actor_rollout_ref.actor.ppo_mini_batch_size % config.actor_rollout_ref.actor.ppo_micro_batch_size == 0
assert config.actor_rollout_ref.actor.ppo_micro_batch_size * sp_size >= n_gpus
# critic
if self.use_critic and not config.critic.use_dynamic_bsz:
sp_size = config.critic.get('ulysses_sequence_parallel_size', 1)
if config.critic.ppo_micro_batch_size is not None:
assert config.critic.ppo_mini_batch_size % config.critic.ppo_micro_batch_size == 0
assert config.critic.ppo_micro_batch_size * sp_size >= n_gpus
# Check if use_remove_padding is enabled when using sequence parallelism for fsdp
if config.actor_rollout_ref.actor.strategy == 'fsdp':
if config.actor_rollout_ref.actor.get('ulysses_sequence_parallel_size', 1) > 1 or \
config.actor_rollout_ref.ref.get('ulysses_sequence_parallel_size', 1) > 1:
assert config.actor_rollout_ref.model.use_remove_padding, \
"When using sequence parallelism for actor/ref policy, you must enable `use_remove_padding`."
if self.use_critic and config.critic.strategy == 'fsdp':
if config.critic.get('ulysses_sequence_parallel_size', 1) > 1:
assert config.critic.model.use_remove_padding, \
"When using sequence parallelism for critic, you must enable `use_remove_padding`."
if config.data.get('val_batch_size', None) is not None:
print(
f"WARNING: val_batch_size is deprecated. Validation datasets are sent to inference engines as a whole batch, which will schedule the memory themselves."
)
print("[validate_config] All configuration checks passed successfully!")
def _create_dataloader(self):
# TODO: we have to make sure the batch size is divisible by the dp size
self.train_dataset = RLHFDataset(parquet_files=self.config.data.train_files,
tokenizer=self.tokenizer,
processor=self.processor,
prompt_key=self.config.data.prompt_key,
image_key=self.config.data.get('image_key', 'images'),
max_prompt_length=self.config.data.max_prompt_length,
filter_prompts=True,
return_raw_chat=self.config.data.get('return_raw_chat', False),
truncation='error',
template_type=self.config.data.template_type)
# use sampler for better ckpt resume
if self.config.data.shuffle:
train_dataloader_generator = torch.Generator()
train_dataloader_generator.manual_seed(self.config.data.get('seed', 1))
sampler = RandomSampler(data_source=self.train_dataset, generator=train_dataloader_generator)
else:
sampler = SequentialSampler(data_source=self.train_dataset)
self.train_dataloader = StatefulDataLoader(dataset=self.train_dataset,
batch_size=self.config.data.train_batch_size,
num_workers=8,
drop_last=True,
collate_fn=collate_fn,
sampler=sampler)
self.val_dataset = RLHFDataset(parquet_files=self.config.data.val_files,
tokenizer=self.tokenizer,
processor=self.processor,
prompt_key=self.config.data.prompt_key,
image_key=self.config.data.get('image_key', 'images'),
max_prompt_length=self.config.data.val_max_prompt_length,
filter_prompts=True,
return_raw_chat=self.config.data.get('return_raw_chat', False),
truncation='error',
template_type=self.config.data.template_type)
self.val_dataloader = StatefulDataLoader(
dataset=self.val_dataset,
# Validation datasets are sent to inference engines as a whole batch,
# which will schedule the memory themselves.
batch_size=len(self.val_dataset),
num_workers=8,
shuffle=False,
drop_last=False,
collate_fn=collate_fn)
assert len(self.train_dataloader) >= 1
assert len(
self.val_dataloader
) == 1, "Validation dataloader must have a single batch, which inference engines will schedule the memory themselves."
print(f'Size of train dataloader: {len(self.train_dataloader)}')
# inject total_training_steps to actor/critic optim_config. This is hacky.
total_training_steps = len(self.train_dataloader) * self.config.trainer.total_epochs
if self.config.trainer.total_training_steps is not None:
total_training_steps = self.config.trainer.total_training_steps
self.total_training_steps = total_training_steps
print(f'Total training steps: {self.total_training_steps}')
OmegaConf.set_struct(self.config, True)
with open_dict(self.config):
self.config.actor_rollout_ref.actor.optim.total_training_steps = total_training_steps
self.config.critic.optim.total_training_steps = total_training_steps
def _maybe_log_val_generations_to_wandb(self, inputs, outputs, scores):
"""Log a table of validation samples to wandb"""
generations_to_log = self.config.trainer.val_generations_to_log_to_wandb
if generations_to_log == 0:
return
if generations_to_log > 0 and 'wandb' not in self.config.trainer.logger:
print(
'WARNING: `val_generations_to_log_to_wandb` is set to a positive value, but no wandb logger is found. ')
return
import wandb
import numpy as np
# Create tuples of (input, output, score) and sort by input text
samples = list(zip(inputs, outputs, scores))
samples.sort(key=lambda x: x[0]) # Sort by input text
# Use fixed random seed for deterministic shuffling
rng = np.random.RandomState(42)
rng.shuffle(samples)
# Take first N samples after shuffling
samples = samples[:generations_to_log]
# Create column names for all samples
columns = ["step"] + sum([[f"input_{i+1}", f"output_{i+1}", f"score_{i+1}"] for i in range(len(samples))], [])
if not hasattr(self, 'validation_table'):
# Initialize the table on first call
self.validation_table = wandb.Table(columns=columns)
# Create a new table with same columns and existing data
# Workaround for https://github.com/wandb/wandb/issues/2981#issuecomment-1997445737
new_table = wandb.Table(columns=columns, data=self.validation_table.data)
# Add new row with all data
row_data = []
row_data.append(self.global_steps)
for sample in samples:
row_data.extend(sample)
new_table.add_data(*row_data)
# Update reference and log
wandb.log({"val/generations": new_table}, step=self.global_steps)
self.validation_table = new_table
def _validate(self):
reward_tensor_lst = []
data_source_lst = []
# Lists to collect samples for the table
sample_inputs = []
sample_outputs = []
sample_scores = []
sample_lengths = []
for test_data in self.val_dataloader:
test_batch = DataProto.from_single_dict(test_data)
# we only do validation on rule-based rm
if self.config.reward_model.enable and test_batch[0].non_tensor_batch['reward_model']['style'] == 'model':
return {}
# Store original inputs
input_ids = test_batch.batch['input_ids']
input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids]
sample_inputs.extend(input_texts)
if 'multi_modal_inputs' in test_batch.non_tensor_batch.keys():
test_gen_batch = test_batch.pop(
batch_keys=['input_ids', 'attention_mask', 'position_ids'],
non_tensor_batch_keys=['raw_prompt_ids', 'multi_modal_data', 'multi_modal_inputs'],
)
else:
test_gen_batch = test_batch.pop(
batch_keys=['input_ids', 'attention_mask', 'position_ids'],
non_tensor_batch_keys=['raw_prompt_ids'],
)
test_gen_batch.meta_info = {
'eos_token_id': self.tokenizer.eos_token_id,
'pad_token_id': self.tokenizer.pad_token_id,
'recompute_log_prob': False,
'do_sample': False,
'validate': True,
}
# pad to be divisible by dp_size
test_gen_batch_padded, pad_size = pad_dataproto_to_divisor(test_gen_batch, self.actor_rollout_wg.world_size)
test_output_gen_batch_padded = self.actor_rollout_wg.generate_sequences(test_gen_batch_padded)
# unpad
test_output_gen_batch = unpad_dataproto(test_output_gen_batch_padded, pad_size=pad_size)
print('validation generation end')
# Store generated outputs
output_ids = test_output_gen_batch.batch['responses']
output_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in output_ids]
sample_outputs.extend(output_texts)
sample_lengths.extend((~torch.isin(output_ids, torch.tensor(self.tokenizer.pad_token_id, device=output_ids.device))).sum(-1).tolist())
test_batch = test_batch.union(test_output_gen_batch)
# evaluate using reward_function
reward_tensor = self.val_reward_fn(test_batch)
reward_tensor = torch.where((reward_tensor == -1.0) | (reward_tensor == -0.5), torch.tensor(0.0), reward_tensor)
test_batch.batch['token_level_scores']=reward_tensor
self._save_samples(test_batch, "test")
# Store scores
scores = reward_tensor.sum(-1).cpu().tolist()
sample_scores.extend(scores)
reward_tensor_lst.append(reward_tensor)
data_source_lst.append(test_batch.non_tensor_batch.get('data_source', ['unknown'] * reward_tensor.shape[0]))
self._maybe_log_val_generations_to_wandb(inputs=sample_inputs, outputs=sample_outputs, scores=sample_scores)
reward_tensor = torch.cat(reward_tensor_lst, dim=0).sum(-1).cpu() # (batch_size,)
data_sources = np.concatenate(data_source_lst, axis=0)
# evaluate test_score based on data source
data_source_reward = {}
data_source_length = {}
for i in range(reward_tensor.shape[0]):
data_source = data_sources[i]
if data_source not in data_source_reward:
data_source_reward[data_source] = []
data_source_length[data_source] = []
data_source_reward[data_source].append(reward_tensor[i].item())
data_source_length[data_source].append(sample_lengths[i])
metric_dict = {}
average_acc=[]
for data_source, rewards in data_source_reward.items():
metric_dict[f'val/test_score/{data_source}'] = np.mean(rewards)
average_acc.append(np.mean(rewards))
metric_dict[f'val/test_score/avg_acc'] = np.mean(average_acc)
for data_source, lengths in data_source_length.items():
metric_dict[f'val_response_length/test_length/{data_source}'] = np.mean(lengths)
rewards=data_source_reward[data_source]
if np.mean(rewards)==0: continue
metric_dict[f'val_response_length/test_length_correct/{data_source}'] = sum([lengths[i] for i in range(len(lengths)) if rewards[i]==1.0])/sum(rewards)
return metric_dict
def _save_samples(self, batch: DataProto, split: str):
prompts, response, token_level_scores=batch.batch['prompts'], batch.batch['responses'], batch.batch['token_level_scores']
sources=batch.non_tensor_batch['data_source']
prompts=self.tokenizer.batch_decode(prompts, skip_special_tokens=True)
response=self.tokenizer.batch_decode(response, skip_special_tokens=True)
rewards=[]
for row in token_level_scores:
non_zero_indices = torch.nonzero(row, as_tuple=True)[0]
if len(non_zero_indices) > 0: rewards.append( row[non_zero_indices[-1]].item() )
else: rewards.append(0.0)
data=[]
for idx in range(len(prompts)):
data.append({
'prompt': prompts[idx],
'response': response[idx],
'reward': rewards[idx],
'source': sources[idx],
})
os.makedirs(os.path.join(self.config.trainer.samples_save_path, split), exist_ok=True)
with open(os.path.join(self.config.trainer.samples_save_path, split, f"step_{self.global_steps}.json"), 'a+', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def init_workers(self):
"""Init resource pool and worker group"""
self.resource_pool_manager.create_resource_pool()
self.resource_pool_to_cls = {pool: {} for pool in self.resource_pool_manager.resource_pool_dict.values()}
# create actor and rollout
if self.hybrid_engine:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.ActorRollout)
actor_rollout_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.ActorRollout],
config=self.config.actor_rollout_ref,
role='actor_rollout')
self.resource_pool_to_cls[resource_pool]['actor_rollout'] = actor_rollout_cls
else:
raise NotImplementedError
# create critic
if self.use_critic:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.Critic)
critic_cls = RayClassWithInitArgs(cls=self.role_worker_mapping[Role.Critic], config=self.config.critic)
self.resource_pool_to_cls[resource_pool]['critic'] = critic_cls
# create reference policy if needed
if self.use_reference_policy:
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RefPolicy)
ref_policy_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.RefPolicy],
config=self.config.actor_rollout_ref,
role='ref')
self.resource_pool_to_cls[resource_pool]['ref'] = ref_policy_cls
# create a reward model if reward_fn is None
if self.use_rm:
# we create a RM here
resource_pool = self.resource_pool_manager.get_resource_pool(Role.RewardModel)
rm_cls = RayClassWithInitArgs(self.role_worker_mapping[Role.RewardModel], config=self.config.reward_model)
self.resource_pool_to_cls[resource_pool]['rm'] = rm_cls
# initialize WorkerGroup
# NOTE: if you want to use a different resource pool for each role, which can support different parallel size,
# you should not use `create_colocated_worker_cls`. Instead, directly pass different resource pool to different worker groups.
# See https://github.com/volcengine/verl/blob/master/examples/ray/tutorial.ipynb for more information.
all_wg = {}
self.wg_dicts = []
for resource_pool, class_dict in self.resource_pool_to_cls.items():
worker_dict_cls = create_colocated_worker_cls(class_dict=class_dict)
wg_dict = self.ray_worker_group_cls(resource_pool=resource_pool, ray_cls_with_init=worker_dict_cls)
spawn_wg = wg_dict.spawn(prefix_set=class_dict.keys())
all_wg.update(spawn_wg)
# keep the referece of WorkerDict to support ray >= 2.31. Ref: https://github.com/ray-project/ray/pull/45699
self.wg_dicts.append(wg_dict)
if self.use_critic:
self.critic_wg = all_wg['critic']
self.critic_wg.init_model()
if self.use_reference_policy:
self.ref_policy_wg = all_wg['ref']
self.ref_policy_wg.init_model()
if self.use_rm:
self.rm_wg = all_wg['rm']
self.rm_wg.init_model()
# we should create rollout at the end so that vllm can have a better estimation of kv cache memory
self.actor_rollout_wg = all_wg['actor_rollout']
self.actor_rollout_wg.init_model()
def _save_checkpoint(self):
# path: given_path + `/global_step_{global_steps}` + `/actor`
local_global_step_folder = os.path.join(self.config.trainer.default_local_dir,
f'global_step_{self.global_steps}')
actor_local_path = os.path.join(local_global_step_folder, 'actor')
actor_remote_path = None if self.config.trainer.default_hdfs_dir is None else os.path.join(
self.config.trainer.default_hdfs_dir, f'global_step_{self.global_steps}', 'actor')
self.actor_rollout_wg.save_checkpoint(actor_local_path,
actor_remote_path,
self.global_steps,
remove_previous_ckpt=self.config.trainer.remove_previous_ckpt_in_save)
if self.use_critic:
critic_local_path = os.path.join(local_global_step_folder, 'critic')
critic_remote_path = None if self.config.trainer.default_hdfs_dir is None else os.path.join(
self.config.trainer.default_hdfs_dir, f'global_step_{self.global_steps}', 'critic')
self.critic_wg.save_checkpoint(critic_local_path,
critic_remote_path,
self.global_steps,
remove_previous_ckpt=self.config.trainer.remove_previous_ckpt_in_save)
# save dataloader
dataloader_local_path = os.path.join(local_global_step_folder, 'data.pt')
dataloader_state_dict = self.train_dataloader.state_dict()
torch.save(dataloader_state_dict, dataloader_local_path)
# latest checkpointed iteration tracker (for atomic usage)
local_latest_checkpointed_iteration = os.path.join(self.config.trainer.default_local_dir,
'latest_checkpointed_iteration.txt')
with open(local_latest_checkpointed_iteration, 'w') as f:
f.write(str(self.global_steps))
def _load_checkpoint(self):
if self.config.trainer.resume_mode == 'disable':
return 0
# load from hdfs
if self.config.trainer.default_hdfs_dir is not None:
NotImplementedError('load from hdfs is not implemented yet')
else:
checkpoint_folder = self.config.trainer.default_local_dir # TODO: check path
if not os.path.isabs(checkpoint_folder):
working_dir = os.getcwd()
checkpoint_folder = os.path.join(working_dir, checkpoint_folder)
global_step_folder = find_latest_ckpt_path(checkpoint_folder) # None if no latest
# find global_step_folder
if self.config.trainer.resume_mode == 'auto':
if global_step_folder is None:
print('Training from scratch')
return 0
else:
if not (self.config.trainer.resume_from_path and global_step_folder is not None):
assert isinstance(self.config.trainer.resume_mode, str), "resume ckpt must be str type"
assert 'global_step_' in self.config.trainer.resume_mode, "resume ckpt must specify the global_steps"
global_step_folder = self.config.trainer.resume_mode
if not os.path.isabs(global_step_folder):
working_dir = os.getcwd()
global_step_folder = os.path.join(working_dir, global_step_folder)
print(f'Load from checkpoint folder: {global_step_folder}')
# set global step
self.global_steps = int(global_step_folder.split('global_step_')[-1])
print(f'Setting global step to {self.global_steps}')
print(f'Resuming from {global_step_folder}')
actor_path = os.path.join(global_step_folder, 'actor')
critic_path = os.path.join(global_step_folder, 'critic')
# load actor
self.actor_rollout_wg.load_checkpoint(actor_path,
del_local_after_load=self.config.trainer.del_local_ckpt_after_load)
# load critic
if self.use_critic:
self.critic_wg.load_checkpoint(critic_path,
del_local_after_load=self.config.trainer.del_local_ckpt_after_load)
# load dataloader,
# TODO: from remote not implemented yet
dataloader_local_path = os.path.join(global_step_folder, 'data.pt')
if os.path.exists(dataloader_local_path):
dataloader_state_dict = torch.load(dataloader_local_path)
self.train_dataloader.load_state_dict(dataloader_state_dict)
else:
print(f"Warning: No dataloader state found at {dataloader_local_path}, will start from scratch")
def _balance_batch(self, batch: DataProto, metrics, logging_prefix='global_seqlen'):
"""Reorder the data on single controller such that each dp rank gets similar total tokens"""
attention_mask = batch.batch['attention_mask']
batch_size = attention_mask.shape[0]
global_seqlen_lst = batch.batch['attention_mask'].view(batch_size, -1).sum(-1).tolist() # (train_batch_size,)
world_size = self.actor_rollout_wg.world_size
global_partition_lst = get_seqlen_balanced_partitions(global_seqlen_lst,
k_partitions=world_size,
equal_size=True)
# reorder based on index. The data will be automatically equally partitioned by dispatch function
global_idx = torch.tensor([j for partition in global_partition_lst for j in partition])
batch.reorder(global_idx)
global_balance_stats = log_seqlen_unbalance(seqlen_list=global_seqlen_lst,
partitions=global_partition_lst,
prefix=logging_prefix)
metrics.update(global_balance_stats)
def fit(self):
"""
The training loop of PPO.
The driver process only need to call the compute functions of the worker group through RPC to construct the PPO dataflow.
The light-weight advantage computation is done on the driver process.
"""
from verl.utils.tracking import Tracking
from omegaconf import OmegaConf
logger = Tracking(project_name=self.config.trainer.project_name,
experiment_name=self.config.trainer.experiment_name,
default_backend=self.config.trainer.logger,
config=OmegaConf.to_container(self.config, resolve=True))
self.global_steps = 0
# load checkpoint before doing anything
self._load_checkpoint()
# perform validation before training
# currently, we only support validation using the reward_function.
if self.val_reward_fn is not None and self.config.trainer.get('val_before_train', True):
val_metrics = self._validate()
pprint(f'Initial validation metrics: {val_metrics}')
logger.log(data=val_metrics, step=self.global_steps)
if self.config.trainer.get('val_only', False):
return
# we start from step 1
self.global_steps += 1
for epoch in range(self.config.trainer.total_epochs):
for batch_dict in self.train_dataloader:
metrics = {}
timing_raw = {}
batch: DataProto = DataProto.from_single_dict(batch_dict)
# pop those keys for generation
if 'multi_modal_inputs' in batch.non_tensor_batch.keys():
gen_batch = batch.pop(
batch_keys=['input_ids', 'attention_mask', 'position_ids'],
non_tensor_batch_keys=['raw_prompt_ids', 'multi_modal_data', 'multi_modal_inputs'],
)
else:
gen_batch = batch.pop(
batch_keys=['input_ids', 'attention_mask', 'position_ids'],
non_tensor_batch_keys=['raw_prompt_ids'],
)
with _timer('step', timing_raw):
# generate a batch
with _timer('gen', timing_raw):
gen_batch_output = self.actor_rollout_wg.generate_sequences(gen_batch)
if self.config.algorithm.adv_estimator == AdvantageEstimator.REMAX:
with _timer('gen_max', timing_raw):
gen_baseline_batch = deepcopy(gen_batch)
gen_baseline_batch.meta_info['do_sample'] = False
gen_baseline_output = self.actor_rollout_wg.generate_sequences(gen_baseline_batch)
batch = batch.union(gen_baseline_output)
reward_baseline_tensor = self.reward_fn(batch)
reward_baseline_tensor = reward_baseline_tensor.sum(dim=-1)
batch.pop(batch_keys=list(gen_baseline_output.batch.keys()))
batch.batch['reward_baselines'] = reward_baseline_tensor
del gen_baseline_batch, gen_baseline_output
batch.non_tensor_batch['uid'] = np.array([str(uuid.uuid4()) for _ in range(len(batch.batch))],
dtype=object)
# repeat to align with repeated responses in rollout
batch = batch.repeat(repeat_times=self.config.actor_rollout_ref.rollout.n, interleave=True)
batch = batch.union(gen_batch_output)
# balance the number of valid tokens on each dp rank.
# Note that this breaks the order of data inside the batch.
# Please take care when you implement group based adv computation such as GRPO and rloo
self._balance_batch(batch, metrics=metrics)
# compute global_valid tokens
batch.meta_info['global_token_num'] = torch.sum(batch.batch['attention_mask'], dim=-1).tolist()
# recompute old_log_probs
with _timer('old_log_prob', timing_raw):
old_log_prob = self.actor_rollout_wg.compute_log_prob(batch)
batch = batch.union(old_log_prob)
if self.use_reference_policy:
# compute reference log_prob
with _timer('ref', timing_raw):
ref_log_prob = self.ref_policy_wg.compute_ref_log_prob(batch)
batch = batch.union(ref_log_prob)
# compute values
if self.use_critic:
with _timer('values', timing_raw):
values = self.critic_wg.compute_values(batch)
batch = batch.union(values)
with _timer('adv', timing_raw):
# compute scores. Support both model and function-based.
# We first compute the scores using reward model. Then, we call reward_fn to combine
# the results from reward model and rule-based results.
if self.use_rm:
# we first compute reward model score
reward_tensor = self.rm_wg.compute_rm_score(batch)
batch = batch.union(reward_tensor)
# we combine with rule-based rm
reward_tensor = self.reward_fn(batch)
batch.batch['token_level_scores'] = reward_tensor
# compute rewards. apply_kl_penalty if available
if not self.config.actor_rollout_ref.actor.get('use_kl_loss', False):
batch, kl_metrics = apply_kl_penalty(batch,
kl_ctrl=self.kl_ctrl,
kl_penalty=self.config.algorithm.kl_penalty)
metrics.update(kl_metrics)
else:
batch.batch['token_level_rewards'] = batch.batch['token_level_scores']
# compute advantages, executed on the driver process
batch = compute_advantage(batch,
adv_estimator=self.config.algorithm.adv_estimator,
gamma=self.config.algorithm.gamma,
lam=self.config.algorithm.lam,
num_repeat=self.config.actor_rollout_ref.rollout.n)
# update critic
if self.use_critic:
with _timer('update_critic', timing_raw):
critic_output = self.critic_wg.update_critic(batch)
critic_output_metrics = reduce_metrics(critic_output.meta_info['metrics'])
metrics.update(critic_output_metrics)
# implement critic warmup
if self.config.trainer.critic_warmup <= self.global_steps:
# update actor
with _timer('update_actor', timing_raw):
actor_output = self.actor_rollout_wg.update_actor(batch)
actor_output_metrics = reduce_metrics(actor_output.meta_info['metrics'])
metrics.update(actor_output_metrics)
# validate
if self.val_reward_fn is not None and self.config.trainer.test_freq > 0 and \
self.global_steps % self.config.trainer.test_freq == 0:
with _timer('testing', timing_raw):
val_metrics: dict = self._validate()
metrics.update(val_metrics)
if self.config.trainer.save_freq > 0 and \
self.global_steps % self.config.trainer.save_freq == 0:
with _timer('save_checkpoint', timing_raw):
self._save_checkpoint()
# collect metrics
self._save_samples(batch, split="train")
metrics.update(compute_data_metrics(batch=batch, use_critic=self.use_critic))
metrics.update(compute_timing_metrics(batch=batch, timing_raw=timing_raw))
# TODO: make a canonical logger that supports various backend
logger.log(data=metrics, step=self.global_steps)
self.global_steps += 1
if self.global_steps >= self.total_training_steps:
# perform validation after training
if self.val_reward_fn is not None:
val_metrics = self._validate()
pprint(f'Final validation metrics: {val_metrics}')
logger.log(data=val_metrics, step=self.global_steps)
if self.config.trainer.save_freq > 0 and \
(self.global_steps - 1) % self.config.trainer.save_freq != 0:
with _timer('save_checkpoint', timing_raw):
self._save_checkpoint()
return
================================================
FILE: verl/trainer/runtime_env.yaml
================================================
working_dir: ./
excludes: ["/.git/"]
env_vars:
TORCH_NCCL_AVOID_RECORD_STREAMS: "1"
VLLM_ATTENTION_BACKEND: "XFORMERS"
================================================
FILE: verl/utils/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import tokenizer
from .tokenizer import hf_tokenizer, hf_processor
__all__ = tokenizer.__all__
================================================
FILE: verl/utils/checkpoint/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/utils/checkpoint/checkpoint_manager.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
from filelock import FileLock
import tempfile
from typing import Union
import torch
import torch.distributed
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType
from transformers import PreTrainedTokenizer, ProcessorMixin
import numpy as np
import random
class BaseCheckpointManager:
"""
A checkpoint manager that saves and loads
- model
- optimizer
- lr_scheduler
- extra_states
in a SPMD way.
We save
- sharded model states and optimizer states
- full lr_scheduler states
- huggingface tokenizer and config for ckpt merge
"""
def __init__(self, model: FSDP, optimizer: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler.LRScheduler, processing_class: Union[PreTrainedTokenizer,
ProcessorMixin]):
self.previous_global_step = None
self.previous_save_local_path = None
self.model = model
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.processing_class = processing_class
assert isinstance(self.model, FSDP)
self.rank = torch.distributed.get_rank()
self.world_size = torch.distributed.get_world_size()
def load_checkpoint(self, *args, **kwargs):
raise NotImplementedError
def save_checkpoint(self, *args, **kwargs):
raise NotImplementedError
def remove_previous_save_local_path(self):
if not self.previous_save_local_path:
return
abs_path = os.path.abspath(self.previous_save_local_path)
print(f'Checkpoint manager remove previous save local path: {abs_path}')
if not os.path.exists(abs_path):
return
# remove previous local_path
shutil.rmtree(abs_path, ignore_errors=True)
@staticmethod
def local_mkdir(path):
if not os.path.isabs(path):
working_dir = os.getcwd()
path = os.path.join(working_dir, path)
# Using hash value of path as lock file name to avoid long file name
lock_filename = f"ckpt_{hash(path) & 0xFFFFFFFF:08x}.lock"
lock_path = os.path.join(tempfile.gettempdir(), lock_filename)
try:
with FileLock(lock_path, timeout=60): # Add timeout
# make a new dir
os.makedirs(path, exist_ok=True)
except Exception as e:
print(f"Warning: Failed to acquire lock for {path}: {e}")
# Even if the lock is not acquired, try to create the directory
os.makedirs(path, exist_ok=True)
return path
@staticmethod
def get_rng_state():
rng_state = {
'cpu': torch.get_rng_state(),
'cuda': torch.cuda.get_rng_state(),
'numpy': np.random.get_state(),
'random': random.getstate(),
}
return rng_state
@staticmethod
def load_rng_state(rng_state):
torch.set_rng_state(rng_state['cpu'])
torch.cuda.set_rng_state(rng_state['cuda'])
np.random.set_state(rng_state['numpy'])
random.setstate(rng_state['random'])
def find_latest_ckpt_path(path, directory_format="global_step_{}"):
if path is None:
return None
tracker_file = get_checkpoint_tracker_filename(path)
if not os.path.exists(tracker_file):
print("Checkpoint tracker file does not exist: %s", tracker_file)
return None
with open(tracker_file, "rb") as f:
iteration = int(f.read().decode())
ckpt_path = os.path.join(path, directory_format.format(iteration))
if not os.path.exists(ckpt_path):
print("Checkpoint does not exist: %s", ckpt_path)
return None
print("Found checkpoint: %s", ckpt_path)
return ckpt_path
def get_checkpoint_tracker_filename(root_path: str):
"""
Tracker file rescords the latest chckpoint during training to restart from.
"""
return os.path.join(root_path, "latest_checkpointed_iteration.txt")
================================================
FILE: verl/utils/checkpoint/fsdp_checkpoint_manager.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ray
import os
import warnings
from typing import Union
import torch
import torch.distributed
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType
from torch.distributed.fsdp import ShardedStateDictConfig, ShardedOptimStateDictConfig
from verl.utils.fs import copy_to_local, is_non_local
from transformers import PreTrainedTokenizer, ProcessorMixin
from .checkpoint_manager import BaseCheckpointManager
class FSDPCheckpointManager(BaseCheckpointManager):
"""
A checkpoint manager that saves and loads
- model
- optimizer
- lr_scheduler
- extra_states
in a SPMD way.
We save
- sharded model states and optimizer states
- full lr_scheduler states
- huggingface tokenizer/processor and config for ckpt merge
"""
def __init__(self,
model: FSDP,
optimizer: torch.optim.Optimizer,
lr_scheduler: torch.optim.lr_scheduler.LRScheduler,
processing_class: Union[PreTrainedTokenizer, ProcessorMixin] = None,
**kwargs):
if processing_class is None:
assert "tokenizer" in kwargs, "tokenizer or processor must be provided"
warnings.warn("`tokenizer` is deprecated. use `processing_class` instead.", DeprecationWarning)
processing_class = kwargs.pop("tokenizer")
super().__init__(model, optimizer, lr_scheduler, processing_class)
def load_checkpoint(self, path=None, del_local_after_load=False, *args, **kwargs):
if path is None:
return
# every rank download its own checkpoint
remote_model_path = os.path.join(path, f'model_world_size_{self.world_size}_rank_{self.rank}.pt')
remote_optim_path = os.path.join(path, f'optim_world_size_{self.world_size}_rank_{self.rank}.pt')
remote_extra_state_path = os.path.join(path, f'extra_state_world_size_{self.world_size}_rank_{self.rank}.pt')
print(
f'[rank-{self.rank}]: Loading from {remote_model_path} and {remote_optim_path} and {remote_extra_state_path}'
)
local_model_path = copy_to_local(remote_model_path)
local_optim_path = copy_to_local(remote_optim_path)
local_extra_state_path = copy_to_local(remote_extra_state_path)
model_state_dict = torch.load(local_model_path)
optimizer_state_dict = torch.load(local_optim_path)
extra_state_dict = torch.load(local_extra_state_path)
if del_local_after_load:
try:
os.remove(local_model_path) if is_non_local(local_model_path) else None
os.remove(local_optim_path) if is_non_local(local_optim_path) else None
os.remove(local_extra_state_path) if is_non_local(local_extra_state_path) else None
except Exception as e:
print(
f'[rank-{self.rank}]: remove local resume ckpt file after loading failed, exception {e} will be ignored'
)
lr_scheduler_state_dict = extra_state_dict['lr_scheduler']
state_dict_cfg = ShardedStateDictConfig(offload_to_cpu=True)
optim_cfg = ShardedOptimStateDictConfig(offload_to_cpu=True)
with FSDP.state_dict_type(self.model, StateDictType.SHARDED_STATE_DICT, state_dict_cfg, optim_cfg):
self.model.load_state_dict(model_state_dict)
if self.optimizer is not None:
self.optimizer.load_state_dict(optimizer_state_dict)
# recover random state
if 'rng' in extra_state_dict:
# 'rng' may not exist for backward compatibility
self.load_rng_state(extra_state_dict['rng'])
if self.lr_scheduler is not None:
self.lr_scheduler.load_state_dict(lr_scheduler_state_dict)
def save_checkpoint(self, local_path: str, global_step: int, remove_previous_ckpt=False, *args, **kwargs):
# record the previous global step
self.previous_global_step = global_step
# remove previous local_path
# TODO: shall we remove previous ckpt every save?
if remove_previous_ckpt:
self.remove_previous_save_local_path()
local_path = self.local_mkdir(local_path)
torch.distributed.barrier()
# every rank will save its own model and optim shard
state_dict_cfg = ShardedStateDictConfig(offload_to_cpu=True)
optim_cfg = ShardedOptimStateDictConfig(offload_to_cpu=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with FSDP.state_dict_type(self.model, StateDictType.SHARDED_STATE_DICT, state_dict_cfg, optim_cfg):
model_state_dict = self.model.state_dict()
if self.optimizer is not None:
optimizer_state_dict = self.optimizer.state_dict()
else:
optimizer_state_dict = None
if self.lr_scheduler is not None:
lr_scheduler_state_dict = self.lr_scheduler.state_dict()
else:
lr_scheduler_state_dict = None
extra_state_dict = {
'lr_scheduler': lr_scheduler_state_dict,
'rng': self.get_rng_state(),
}
model_path = os.path.join(local_path, f'model_world_size_{self.world_size}_rank_{self.rank}.pt')
optim_path = os.path.join(local_path, f'optim_world_size_{self.world_size}_rank_{self.rank}.pt')
extra_path = os.path.join(local_path, f'extra_state_world_size_{self.world_size}_rank_{self.rank}.pt')
print(f'[rank-{self.rank}]: Saving model to {os.path.abspath(model_path)}')
print(f'[rank-{self.rank}]: Saving checkpoint to {os.path.abspath(model_path)}')
print(f'[rank-{self.rank}]: Saving extra_state to {os.path.abspath(extra_path)}')
torch.save(model_state_dict, model_path)
torch.save(optimizer_state_dict, optim_path) # TODO: address optimizer is None
torch.save(extra_state_dict, extra_path)
# wait for everyone to dump to local
torch.distributed.barrier()
if self.rank == 0:
hf_local_path = os.path.join(local_path, 'huggingface')
os.makedirs(hf_local_path, exist_ok=True)
self.model._fsdp_wrapped_module.config.save_pretrained(hf_local_path)
self.processing_class.save_pretrained(hf_local_path)
torch.distributed.barrier()
self.previous_save_local_path = local_path
================================================
FILE: verl/utils/config.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from omegaconf import DictConfig
def update_dict_with_config(dictionary: Dict, config: DictConfig):
for key in dictionary:
if hasattr(config, key):
dictionary[key] = getattr(config, key)
================================================
FILE: verl/utils/dataset/README.md
================================================
# Dataset Format
## RLHF dataset
We combine all the data sources into a single parquet files. We directly organize the prompt into the chat format so that multi-turn chats can be easily incorporated. In the prompt, we may add instruction following texts to guide the model output the answers in a particular format so that we can extract the answers.
Math problems
```json
{
"data_source": "openai/gsm8k",
"prompt": [{"role": "user", "content": "Natalia sold clips to 48 of her friends in April, and then she sold half as many clips in May. How many clips did Natalia sell altogether in April and May? Let's think step by step and output the final answer after \"####\""}],
"ability": "math",
"reward_model": {
"style": "rule",
"ground_truth": ["72"]
},
}
```
================================================
FILE: verl/utils/dataset/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .rl_dataset import RLHFDataset
from .rm_dataset import RMDataset
from .sft_dataset import SFTDataset
================================================
FILE: verl/utils/dataset/rl_dataset.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf import ListConfig
import os
from typing import List, Union, Optional
import copy
import pandas as pd
from collections import defaultdict
import torch
import numpy as np
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizer, ProcessorMixin
from verl.utils.model import compute_position_id_with_mask
import verl.utils.torch_functional as verl_F
tir_base_0307='''A conversation between User and Assistant. The user asks a question, and the Assistant solves it. The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. User: Please integrate natural language reasoning with programs to solve the problem above, and put your final answer within \\boxed{}.\n[PROMPT]\nAssistant:
'''
tir_base_0309='''A conversation between User and Assistant. The user asks a question, and the Assistant solves it.\nUser: Please integrate natural language reasoning with programs to solve the problem above, and put your final answer within \\boxed{}.\n[PROMPT]\nAssistant:
'''
def collate_fn(data_list: list[dict]) -> dict:
tensors = defaultdict(list)
non_tensors = defaultdict(list)
for data in data_list:
for key, val in data.items():
if isinstance(val, torch.Tensor):
tensors[key].append(val)
else:
non_tensors[key].append(val)
for key, val in tensors.items():
tensors[key] = torch.stack(val, dim=0)
for key, val in non_tensors.items():
non_tensors[key] = np.array(val, dtype=object)
return {**tensors, **non_tensors}
def process_image(image: dict, max_pixels: int = 2048 * 2048, min_pixels: int = 512 * 512):
import math
from io import BytesIO
from PIL import Image
if isinstance(image, dict):
image = Image.open(BytesIO(image['bytes']))
if (image.width * image.height) > max_pixels:
resize_factor = math.sqrt(max_pixels / (image.width * image.height))
width, height = int(image.width * resize_factor), int(image.height * resize_factor)
image = image.resize((width, height))
if (image.width * image.height) < min_pixels:
resize_factor = math.sqrt(min_pixels / (image.width * image.height))
width, height = int(image.width * resize_factor), int(image.height * resize_factor)
image = image.resize((width, height))
if image.mode != 'RGB':
image = image.convert('RGB')
return image
class RLHFDataset(Dataset):
"""
We assume the dataset contains a column that contains prompts and other information
"""
def __init__(self,
parquet_files: Union[str, List[str]],
tokenizer: PreTrainedTokenizer,
processor: Optional[ProcessorMixin] = None,
prompt_key='prompt',
image_key='images',
max_prompt_length=1024,
filter_prompts=True,
cache_dir='~/.cache/verl/rlhf',
chat_template_func=None,
return_raw_chat=False,
truncation='error',
template_type='default'):
if not isinstance(parquet_files, (List, ListConfig)):
parquet_files = [parquet_files]
self.parquet_files = copy.deepcopy(parquet_files)
self.original_parquet_files = copy.deepcopy(parquet_files) # use for resume
self.cache_dir = os.path.expanduser(cache_dir)
self.tokenizer = tokenizer
self.processor = processor
self.prompt_key = prompt_key
self.image_key = image_key
self.max_prompt_length = max_prompt_length
self.filter_prompts = filter_prompts
self.return_raw_chat = return_raw_chat
self.chat_template_func = chat_template_func
self.truncation = truncation
self.template_type = template_type
# whether to store the dataset in state_dict()
# default not store
self.serialize_dataset = False
self._download()
self._read_files_and_tokenize()
def _download(self, use_origin_parquet=False):
from verl.utils.fs import copy_to_local
parquet_files = self.parquet_files if not use_origin_parquet else self.original_parquet_files
for i, parquet_file in enumerate(parquet_files):
self.parquet_files[i] = copy_to_local(src=parquet_file, cache_dir=self.cache_dir)
def _read_files_and_tokenize(self):
dataframes = []
for parquet_file in self.parquet_files:
# read parquet files and cache
dataframe = pd.read_parquet(parquet_file)
dataframes.append(dataframe)
self.dataframe = pd.concat(dataframes)
print(f'original dataset len: {len(self.dataframe)}')
# filter out too long prompts
tokenizer = self.tokenizer
prompt_key = self.prompt_key
if self.template_type=="default":
self.dataframe = self.dataframe[self.dataframe.apply(lambda doc: len(
tokenizer.apply_chat_template(doc[prompt_key], add_generation_prompt=True)) <= self.max_prompt_length,
axis=1)]
elif self.template_type=="tir_base_0307":
self.dataframe = self.dataframe[self.dataframe.apply(lambda doc: len(tokenizer.encode(tir_base_0307.replace("[PROMPT]", doc[prompt_key][1]['content']))) <= self.max_prompt_length-10, axis=1)]
elif self.template_type=="tir_base_0309":
self.dataframe = self.dataframe[self.dataframe.apply(lambda doc: len(tokenizer.encode(tir_base_0309.replace("[PROMPT]", doc[prompt_key][1]['content']))) <= self.max_prompt_length-10, axis=1)]
print(f'filter dataset len: {len(self.dataframe)}')
def resume_dataset_state(self):
self.serialize_dataset = False if hasattr(self, 'original_parquet_files') else True
# resume dataframe if not it's serialized in data.pt
if not self.serialize_dataset:
self._download(use_origin_parquet=True) # download and resume from original parquet files
self._read_files_and_tokenize()
else:
print(r'old dataloader ckpt file is used, please train from scratch for better ckpt performance')
def __len__(self):
return len(self.dataframe)
def __getitem__(self, item):
"""
Note that we also return the raw_input_ids so that it can be combined with other chat template
"""
row_dict: dict = self.dataframe.iloc[item].to_dict()
chat = row_dict.pop(self.prompt_key)
if self.template_type=="default":
prompt_with_chat_template = self.tokenizer.apply_chat_template(chat, add_generation_prompt=True, tokenize=False)
elif self.template_type=="tir_base_0307":
prompt_with_chat_template = tir_base_0307.replace("[PROMPT]", chat[1]['content'])
elif self.template_type=="tir_base_0309":
prompt_with_chat_template = tir_base_0309.replace("[PROMPT]", chat[1]['content'])
if self.image_key in row_dict: # expand image token
raw_prompt = prompt_with_chat_template.replace('', '<|vision_start|><|image_pad|><|vision_end|>')
row_dict['multi_modal_data'] = {'image': [process_image(image) for image in row_dict.pop(self.image_key)]}
image_inputs = self.processor.image_processor(row_dict['multi_modal_data']['image'], return_tensors='pt')
image_grid_thw = image_inputs['image_grid_thw']
row_dict['multi_modal_inputs'] = {key: val for key, val in image_inputs.items()}
if image_grid_thw is not None:
merge_length = self.processor.image_processor.merge_size**2
index = 0
while '' in prompt_with_chat_template:
prompt_with_chat_template = prompt_with_chat_template.replace(
'',
'<|vision_start|>' + '<|placeholder|>' * (image_grid_thw[index].prod() // merge_length) +
'<|vision_end|>',
1,
)
index += 1
prompt_with_chat_template = prompt_with_chat_template.replace('<|placeholder|>',
self.processor.image_token)
else:
raw_prompt = prompt_with_chat_template
input_ids, attention_mask = verl_F.tokenize_and_postprocess_data(prompt=prompt_with_chat_template,
tokenizer=self.tokenizer,
max_length=self.max_prompt_length,
pad_token_id=self.tokenizer.pad_token_id,
left_pad=True,
truncation=self.truncation)
if self.image_key in row_dict:
from verl.models.transformers.qwen2_vl import get_rope_index
position_ids = get_rope_index(
self.processor,
input_ids=input_ids[0],
image_grid_thw=image_grid_thw,
attention_mask=attention_mask[0],
) # (3, seq_len)
else:
position_ids = compute_position_id_with_mask(attention_mask)
row_dict['input_ids'] = input_ids[0]
row_dict['attention_mask'] = attention_mask[0]
row_dict['position_ids'] = position_ids[0]
row_dict['raw_prompt_ids'] = self.tokenizer.encode(raw_prompt, add_special_tokens=False)
# encode prompts without chat template
if self.return_raw_chat:
row_dict['raw_prompt'] = chat.tolist()
# add index for each prompt
index = row_dict.get("extra_info", {}).get("index", 0)
row_dict["index"] = index
return row_dict
def __getstate__(self):
if not self.serialize_dataset:
state = self.__dict__.copy()
if 'dataframe' in state:
del state['dataframe']
return state
return self.__dict__.copy()
================================================
FILE: verl/utils/dataset/rm_dataset.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Union
import pandas as pd
import torch
from torch.utils.data import Dataset
from transformers import AutoTokenizer
from verl.utils import hf_tokenizer
def download_files_distributed(download_fn):
import torch.distributed
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
# download files
download_fn()
torch.distributed.barrier()
else:
# download anyway
download_fn()
class RMDataset(Dataset):
def __init__(self,
parquet_files: Union[str, List[str]],
tokenizer,
prompt_key='prompt',
chosen_key='chosen',
rejected_key='rejected',
max_length=1024,
add_eos=True,
cache_dir='~/.cache/verl/rm'):
if not isinstance(parquet_files, List):
parquet_files = [parquet_files]
self.parquet_files = parquet_files
self.cache_dir = os.path.expanduser(cache_dir)
if isinstance(tokenizer, str):
tokenizer = hf_tokenizer(tokenizer)
self.tokenizer = tokenizer
self.prompt_key = prompt_key
self.chosen_key = chosen_key
self.rejected_key = rejected_key
self.add_eos = add_eos
self.max_length = max_length
self._download()
self._read_files_and_tokenize()
def _download(self):
def _download_files():
from verl.utils.fs import copy, is_non_local
os.makedirs(self.cache_dir, exist_ok=True)
assert os.path.exists(self.cache_dir)
for i, parquet_file in enumerate(self.parquet_files):
if is_non_local(parquet_file):
dst = os.path.join(self.cache_dir, os.path.basename(parquet_file))
if not os.path.exists(dst):
copy(src=parquet_file, dst=dst)
self.parquet_files[i] = dst
download_files_distributed(_download_files)
def _read_files_and_tokenize(self):
dataframes = []
for parquet_file in self.parquet_files:
# read parquet files and cache
dataframe = pd.read_parquet(parquet_file)
dataframes.append(dataframe)
self.dataframe = pd.concat(dataframes)
self.prompts = self.dataframe[self.prompt_key].tolist()
self.chosen_responses = self.dataframe[self.chosen_key].tolist()
self.rejected_responses = self.dataframe[self.rejected_key].tolist()
def __len__(self):
return len(self.prompts)
def _pad_to_length(self, input_ids, attention_mask):
curr_length = input_ids.shape[-1]
if curr_length < self.max_length:
input_ids = torch.cat(
(input_ids, torch.zeros(size=(self.max_length - curr_length,), dtype=input_ids.dtype)), dim=-1)
attention_mask = torch.cat(
(attention_mask, torch.zeros(size=(self.max_length - curr_length,), dtype=attention_mask.dtype)),
dim=-1)
elif curr_length > self.max_length:
input_ids = input_ids[:self.max_length]
attention_mask = attention_mask[:self.max_length]
return input_ids, attention_mask
def __getitem__(self, item):
prompt = self.prompts[item]
chosen_response = self.chosen_responses[item]
rejected_response = self.rejected_responses[item]
prompt_ids = self.tokenizer(prompt, return_tensors='pt')['input_ids'][0]
chosen_response_ids = self.tokenizer(chosen_response, return_tensors='pt')['input_ids'][0]
rejected_response_ids = self.tokenizer(rejected_response, return_tensors='pt')['input_ids'][0]
if self.add_eos:
chosen_response_ids = torch.cat((chosen_response_ids, torch.tensor([self.tokenizer.eos_token_id])), dim=-1)
rejected_response_ids = torch.cat((rejected_response_ids, torch.tensor([self.tokenizer.eos_token_id])),
dim=-1)
chosen_input_ids = torch.cat((prompt_ids, chosen_response_ids), dim=-1)
chosen_attention_mask = torch.ones_like(chosen_input_ids)
rejected_input_ids = torch.cat((prompt_ids, rejected_response_ids), dim=-1)
rejected_attention_mask = torch.ones_like(rejected_input_ids)
chosen_input_ids, chosen_attention_mask = self._pad_to_length(chosen_input_ids, chosen_attention_mask)
rejected_input_ids, rejected_attention_mask = self._pad_to_length(rejected_input_ids, rejected_attention_mask)
input_ids = torch.stack((chosen_input_ids, rejected_input_ids), dim=0)
attention_mask = torch.stack((rejected_input_ids, rejected_attention_mask), dim=0)
return {
'input_ids': input_ids,
'attention_mask': attention_mask,
}
================================================
FILE: verl/utils/dataset/sft_dataset.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SFT dataset
- We assume user pass a single parquet file.
- We load all the data into the memory.
Each parquet file contains
"""
from typing import List, Union
import pandas as pd
import torch
from torch.utils.data import Dataset
from transformers import AutoTokenizer, PreTrainedTokenizer
from verl.utils.fs import copy_to_local
from verl.utils.model import compute_position_id_with_mask
from verl.utils import hf_tokenizer
class SFTDataset(Dataset):
"""
This is an in-memory SFTDataset
"""
def __init__(self,
parquet_files: Union[str, List[str]],
tokenizer,
prompt_key='prompt',
prompt_dict_keys=None,
response_key='response',
response_dict_keys=None,
max_length=1024,
truncation='error'):
assert truncation in ['error', 'left', 'right']
self.truncation = truncation
if not isinstance(parquet_files, List):
parquet_files = [parquet_files]
self.parquet_files = parquet_files
if isinstance(tokenizer, str):
tokenizer = hf_tokenizer(tokenizer)
self.tokenizer: PreTrainedTokenizer = tokenizer
self.prompt_key = prompt_key if isinstance(prompt_key, (tuple, list)) else [prompt_key]
self.response_key = response_key if isinstance(response_key, (tuple, list)) else [response_key]
self.prompt_dict_keys = [] if not prompt_dict_keys else prompt_dict_keys
self.response_dict_keys = [] if not response_dict_keys else response_dict_keys
self.max_length = max_length
self._download()
self._read_files_and_tokenize()
def _download(self):
for i, parquet_file in enumerate(self.parquet_files):
self.parquet_files[i] = copy_to_local(parquet_file, verbose=True)
def _read_files_and_tokenize(self):
def series_to_item(ls):
import pandas, numpy
while isinstance(ls, (pandas.core.series.Series, numpy.ndarray)) and len(ls) == 1:
ls = ls[0]
return ls
dataframes = []
for parquet_file in self.parquet_files:
# read parquet files and cache
dataframe = pd.read_parquet(parquet_file)
dataframes.append(dataframe)
self.dataframe = pd.concat(dataframes)
self.prompts = self.dataframe[self.prompt_key]
for key in self.prompt_dict_keys:
# type(x): pandas.core.series.Series
# type(x[0]): numpy.ndarray
# type(x[0][0]): dict
try:
self.prompts = self.prompts.apply(lambda x: series_to_item(x)[key], axis=1)
except Exception:
print(f'self.prompts={self.prompts}')
raise
self.prompts = self.prompts.tolist()
self.responses = self.dataframe[self.response_key]
for key in self.response_dict_keys:
try:
self.responses = self.responses.apply(lambda x: series_to_item(x)[key], axis=1)
except Exception:
print(f'self.responses={self.responses}')
raise
self.responses = self.responses.tolist()
def __len__(self):
return len(self.prompts)
def __getitem__(self, item):
tokenizer = self.tokenizer
prompt = self.prompts[item]
response = self.responses[item]
# apply chat template
prompt_chat = [{'role': 'user', 'content': prompt}]
# string
prompt_chat_str = tokenizer.apply_chat_template(prompt_chat, add_generation_prompt=True, tokenize=False)
response_chat_str = response + tokenizer.eos_token
# tokenize
prompt_ids_output = tokenizer(prompt_chat_str, return_tensors='pt', add_special_tokens=False)
prompt_ids = prompt_ids_output['input_ids'][0]
prompt_attention_mask = prompt_ids_output['attention_mask'][0]
response_ids_output = tokenizer(response_chat_str, return_tensors='pt', add_special_tokens=False)
response_ids = response_ids_output['input_ids'][0]
response_attention_mask = response_ids_output['attention_mask'][0]
prompt_length = prompt_ids.shape[0]
response_length = response_ids.shape[0]
input_ids = torch.cat((prompt_ids, response_ids), dim=-1)
attention_mask = torch.cat((prompt_attention_mask, response_attention_mask), dim=-1)
# padding to max length
sequence_length = input_ids.shape[0]
if sequence_length < self.max_length:
padded_input_ids = torch.ones(size=(self.max_length - sequence_length,),
dtype=input_ids.dtype) * self.tokenizer.pad_token_id
padded_attention_mask = torch.zeros(size=(self.max_length - sequence_length,), dtype=attention_mask.dtype)
input_ids = torch.cat((input_ids, padded_input_ids))
attention_mask = torch.cat((attention_mask, padded_attention_mask))
elif sequence_length > self.max_length:
if self.truncation == 'left':
# actually, left truncation may not be reasonable
input_ids = input_ids[-self.max_length:]
attention_mask = attention_mask[-self.max_length:]
elif self.truncation == 'right':
input_ids = input_ids[:self.max_length]
attention_mask = attention_mask[:self.max_length]
elif self.truncation == 'error':
raise NotImplementedError(f'{sequence_length=} is larger than {self.max_length=}')
else:
raise NotImplementedError(f'Unknown truncation method {self.truncation}')
position_ids = compute_position_id_with_mask(attention_mask)
loss_mask = attention_mask.clone()
if prompt_length > 1:
# mask out prompt for SFT.
loss_mask[:min(prompt_length, loss_mask.size(0)) - 1] = 0
# mask out the last token in response
loss_mask[min(prompt_length + response_length, loss_mask.size(0)) - 1] = 0
return {
'input_ids': input_ids,
'attention_mask': attention_mask,
'position_ids': position_ids,
'loss_mask': loss_mask
}
================================================
FILE: verl/utils/debug/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .performance import log_gpu_memory_usage
================================================
FILE: verl/utils/debug/performance.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed as dist
import logging
def log_gpu_memory_usage(head: str, logger: logging.Logger = None, level=logging.DEBUG, rank: int = 0):
if (not dist.is_initialized()) or (rank is None) or (dist.get_rank() == rank):
memory_allocated = torch.cuda.memory_allocated() / 1024**3
memory_reserved = torch.cuda.memory_reserved() / 1024**3
message = f'{head}, memory allocated (GB): {memory_allocated}, memory reserved (GB): {memory_reserved}'
if logger is None:
print(message)
else:
logger.log(msg=message, level=level)
================================================
FILE: verl/utils/debug/trajectory_tracker.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Trajectory tracker can be inserted into code to save the intermediate results.
The results will be dump to hdfs for offline comparison.
Each process will have a client that first move all the tensors to CPU
"""
from verl.utils.hdfs_io import makedirs, copy
import torch
import os
import ray
import io
import tempfile
from collections import deque
remote_copy = ray.remote(copy)
@ray.remote
def save_to_hdfs(data: io.BytesIO, name, hdfs_dir, verbose):
filename = name + '.pth'
with tempfile.TemporaryDirectory() as tmpdirname:
local_filepath = os.path.join(tmpdirname, filename)
with open(local_filepath, 'wb') as f:
f.write(data.getbuffer())
# upload to hdfs
if verbose:
print(f'Saving {local_filepath} to {hdfs_dir}')
try:
copy(local_filepath, hdfs_dir)
except Exception as e:
print(e)
@ray.remote
class TrajectoryTracker():
def __init__(self, hdfs_dir, verbose) -> None:
self.hdfs_dir = hdfs_dir
makedirs(hdfs_dir)
self.verbose = verbose
self.handle = deque()
def dump(self, data: io.BytesIO, name):
# get a temp file and write to it
self.handle.append(save_to_hdfs.remote(data, name, self.hdfs_dir, self.verbose))
def wait_for_hdfs(self):
while len(self.handle) != 0:
future = self.handle.popleft()
ray.get(future)
def dump_data(data, name):
enable = os.getenv('VERL_ENABLE_TRACKER', '0') == '1'
if not enable:
return
buffer = io.BytesIO()
torch.save(data, buffer)
tracker = get_trajectory_tracker()
ray.get(tracker.dump.remote(buffer, name))
def get_trajectory_tracker():
hdfs_dir = os.getenv('VERL_TRACKER_HDFS_DIR', default=None)
verbose = os.getenv('VERL_TRACKER_VERBOSE', default='0') == '1'
assert hdfs_dir is not None
tracker = TrajectoryTracker.options(name="global_tracker", get_if_exists=True,
lifetime="detached").remote(hdfs_dir, verbose)
return tracker
if __name__ == '__main__':
# testing
os.environ['VERL_ENABLE_TRACKER'] = '1'
os.environ['VERL_TRACKER_HDFS_DIR'] = '~/debug/test'
@ray.remote
def process(iter):
data = {'obs': torch.randn(10, 20)}
dump_data(data, f'process_{iter}_obs')
ray.init()
output_lst = []
for i in range(10):
output_lst.append(process.remote(i))
out = ray.get(output_lst)
tracker = get_trajectory_tracker()
ray.get(tracker.wait_for_hdfs.remote())
================================================
FILE: verl/utils/distributed.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for distributed training."""
import os
def initialize_global_process_group(timeout_second=36000):
import torch.distributed
from datetime import timedelta
torch.distributed.init_process_group('nccl', timeout=timedelta(seconds=timeout_second))
local_rank = int(os.environ["LOCAL_RANK"])
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
if torch.distributed.is_initialized():
torch.cuda.set_device(local_rank)
return local_rank, rank, world_size
================================================
FILE: verl/utils/flops_counter.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from transformers import PretrainedConfig
VALID_CONFIG_TYPE = {"llama", "qwen2", "qwen2_vl", "qwen2_5_vl"}
def get_device_flops(unit="T"):
def unit_convert(number, level):
units = ["B", "K", "M", "G", "T", "P"]
if number <= 0:
return number
ptr = 0
while ptr < len(units) and units[ptr] != level:
number /= 1000
ptr += 1
return number
device_name = torch.cuda.get_device_name()
flops = float("inf") # INF flops for unkown gpu type
if "H100" in device_name or "H800" in device_name:
flops = 989e12
elif "A100" in device_name or "A800" in device_name:
flops = 312e12
elif "L40" in device_name:
flops = 181.05e12
elif "L20" in device_name:
flops = 119.5e12
elif "H20" in device_name:
flops = 148e12
elif "910B" in device_name:
flops = 354e12
flops_unit = unit_convert(flops, unit)
return flops_unit
class FlopsCounter:
"""
Used to count mfu during training loop
Example:
flops_counter = FlopsCounter(config)
flops_achieved, flops_promised = flops_counter.estimate_flops(tokens_list, delta_time)
"""
def __init__(self, config: PretrainedConfig):
if not config.model_type in VALID_CONFIG_TYPE:
print(f"Only support config type of {VALID_CONFIG_TYPE}, but got {self.config.model_type}. "
f"MFU will always be zero.")
self.estimate_func = {
'qwen2': self._estimate_qwen2_flops,
'llama': self._estimate_qwen2_flops,
'qwen2_vl': self._estimate_qwen2_flops,
'qwen2_5_vl': self._estimate_qwen2_flops
}
self.config = config
def _estimate_unknown_flops(self, tokens_sum, batch_seqlens, delta_time):
return 0
def _estimate_qwen2_flops(self, tokens_sum, batch_seqlens, delta_time):
hidden_size = self.config.hidden_size
vocab_size = self.config.vocab_size
num_hidden_layers = self.config.num_hidden_layers
num_key_value_heads = self.config.num_key_value_heads
num_attention_heads = self.config.num_attention_heads
intermediate_size = self.config.intermediate_size
head_dim = hidden_size // num_attention_heads
q_size = num_attention_heads * head_dim
k_size = num_key_value_heads * head_dim
v_size = num_key_value_heads * head_dim
# non-attn per layer parm
# Qwen2/LLama use SwiGelu, gate, having up and down linear layer in mlp
mlp_N = hidden_size * intermediate_size * 3
attn_linear_N = hidden_size * (q_size + k_size + v_size + num_attention_heads * head_dim)
emd_and_lm_head_N = vocab_size * hidden_size * 2
# non-attn all_layer parm
dense_N = (mlp_N + attn_linear_N) * num_hidden_layers + emd_and_lm_head_N
# non-attn all_layer & all_token fwd & bwd flops
dense_N_flops = 6 * dense_N * tokens_sum
# attn all_layer & all_token fwd & bwd flops
seqlen_square_sum = 0
for seqlen in batch_seqlens:
seqlen_square_sum += seqlen * seqlen
attn_qkv_flops = 12 * seqlen_square_sum * head_dim * num_attention_heads * num_hidden_layers
# all_layer & all_token fwd & bwd flops
flops_all_token = dense_N_flops + attn_qkv_flops
flops_achieved = flops_all_token * (1.0 / delta_time) / 1e12
return flops_achieved
def estimate_flops(self, batch_seqlens, delta_time):
"""
Estimate the FLOPS based on the number of valid tokens in the current batch and the time taken.
Args:
batch_seqlens (List[int]): A list where each element represents the number of valid tokens in the current batch.
delta_time (float): The time taken to process the batch, in seconds.
Returns:
estimated_flops (float): The estimated FLOPS based on the input tokens and time.
promised_flops (float): The expected FLOPS of the current device.
"""
tokens_sum = sum(batch_seqlens)
func = self.estimate_func.get(self.config.model_type, self._estimate_unknown_flops)
estimated_flops = func(tokens_sum, batch_seqlens, delta_time)
promised_flops = get_device_flops()
return estimated_flops, promised_flops
================================================
FILE: verl/utils/fs.py
================================================
#!/usr/bin/env python
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""File-system agnostic IO APIs"""
import os
import tempfile
import hashlib
try:
from hdfs_io import copy, makedirs, exists # for internal use only
except ImportError:
from .hdfs_io import copy, makedirs, exists
__all__ = ["copy", "exists", "makedirs"]
_HDFS_PREFIX = "hdfs://"
def is_non_local(path):
return path.startswith(_HDFS_PREFIX)
def md5_encode(path: str) -> str:
return hashlib.md5(path.encode()).hexdigest()
def get_local_temp_path(hdfs_path: str, cache_dir: str) -> str:
"""Return a local temp path that joins cache_dir and basename of hdfs_path
Args:
hdfs_path:
cache_dir:
Returns:
"""
# make a base64 encoding of hdfs_path to avoid directory conflict
encoded_hdfs_path = md5_encode(hdfs_path)
temp_dir = os.path.join(cache_dir, encoded_hdfs_path)
os.makedirs(temp_dir, exist_ok=True)
dst = os.path.join(temp_dir, os.path.basename(hdfs_path))
return dst
def copy_to_local(src: str, cache_dir=None, filelock='.file.lock', verbose=False) -> str:
"""Copy src from hdfs to local if src is on hdfs or directly return src.
If cache_dir is None, we will use the default cache dir of the system. Note that this may cause conflicts if
the src name is the same between calls
Args:
src (str): a HDFS path of a local path
Returns:
a local path of the copied file
"""
return copy_local_path_from_hdfs(src, cache_dir, filelock, verbose)
def copy_local_path_from_hdfs(src: str, cache_dir=None, filelock='.file.lock', verbose=False) -> str:
"""Deprecated. Please use copy_to_local instead."""
from filelock import FileLock
assert src[-1] != '/', f'Make sure the last char in src is not / because it will cause error. Got {src}'
if is_non_local(src):
# download from hdfs to local
if cache_dir is None:
# get a temp folder
cache_dir = tempfile.gettempdir()
os.makedirs(cache_dir, exist_ok=True)
assert os.path.exists(cache_dir)
local_path = get_local_temp_path(src, cache_dir)
# get a specific lock
filelock = md5_encode(src) + '.lock'
lock_file = os.path.join(cache_dir, filelock)
with FileLock(lock_file=lock_file):
if not os.path.exists(local_path):
if verbose:
print(f'Copy from {src} to {local_path}')
copy(src, local_path)
return local_path
else:
return src
================================================
FILE: verl/utils/fsdp_utils.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import functools
import json
import math
import itertools
import os
from contextlib import contextmanager
from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp._runtime_utils import _lazy_init
from transformers.trainer_pt_utils import get_module_class_from_name
import torch
import torch.nn as nn
import torch.distributed as dist
def init_fn(x: torch.nn.Module):
if not torch.distributed.get_rank() == 0:
x = x.to_empty(device=torch.cuda.current_device(), recurse=False)
torch.cuda.empty_cache()
return x
def get_init_weight_context_manager(use_meta_tensor=True):
from accelerate import init_empty_weights
cpu_init_weights = lambda: torch.device('cpu')
if use_meta_tensor:
init_context = init_empty_weights if torch.distributed.get_rank() != 0 else cpu_init_weights
else:
init_context = cpu_init_weights
return init_context
# Copyright 2020-present the HuggingFace Inc. team.
# Adapted from https://github.com/huggingface/transformers/src/transformers/trainer.py
def get_fsdp_wrap_policy(module, config=None, is_lora=False):
"""Get FSDP wrap policy for the module.
Args:
module: The module to get wrap policy for
config: Configuration for wrap policy
is_lora: Whether to enable lambda policy for LoRA modules
"""
if config is None:
config = {}
if config.get('disable', False):
return None
default_transformer_cls_names_to_wrap = getattr(module, "_no_split_modules", None)
fsdp_transformer_layer_cls_to_wrap = config.get("transformer_layer_cls_to_wrap",
default_transformer_cls_names_to_wrap)
min_num_params = config.get('min_num_params', 0)
auto_wrap_policy = None
policies = []
from torch.distributed.fsdp.wrap import _or_policy, lambda_auto_wrap_policy, transformer_auto_wrap_policy
# Add lambda policy for LoRA modules if is_lora is True
if is_lora:
def lambda_policy_fn(module):
if (len(list(module.named_children())) == 0 and getattr(module, "weight", None) is not None and
module.weight.requires_grad):
return True
return False
lambda_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=lambda_policy_fn)
policies.append(lambda_policy)
if min_num_params > 0:
size_policy = functools.partial(size_based_auto_wrap_policy, min_num_params=min_num_params)
policies.append(size_policy)
elif fsdp_transformer_layer_cls_to_wrap is not None:
transformer_cls_to_wrap = set()
for layer_class in fsdp_transformer_layer_cls_to_wrap:
transformer_cls = get_module_class_from_name(module, layer_class)
if transformer_cls is None:
raise Exception("Could not find the transformer layer class to wrap in the model.")
else:
transformer_cls_to_wrap.add(transformer_cls)
transformer_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls=transformer_cls_to_wrap,
)
policies.append(transformer_policy)
if len(policies) > 0:
auto_wrap_policy = functools.partial(_or_policy, policies=policies)
return auto_wrap_policy
@torch.no_grad()
def offload_fsdp_model_to_cpu(model: FSDP, empty_cache: bool = True):
assert isinstance(model, FSDP)
# lazy init FSDP model
_lazy_init(model, model)
assert model._is_root, f"Only support root model offloading to CPU"
for handle in model._all_handles:
if handle._offload_params:
continue
flat_param = handle.flat_param
assert flat_param.data.data_ptr() == flat_param._local_shard.data_ptr() and \
id(flat_param.data) != id(flat_param._local_shard) and \
flat_param.data.size() == flat_param._local_shard.size()
handle.flat_param_to(torch.device("cpu"), non_blocking=True)
# the following still keeps id(._local_shard) != id(.data)
flat_param._local_shard = flat_param.data
assert id(flat_param._local_shard) != id(flat_param.data)
if empty_cache:
torch.cuda.empty_cache()
@torch.no_grad()
def load_fsdp_model_to_gpu(model: FSDP):
assert isinstance(model, FSDP)
# lazy init FSDP model
_lazy_init(model, model)
assert model._is_root, f"Only support root model loading to GPU"
device_id = torch.cuda.current_device()
for handle in model._all_handles:
if handle._offload_params:
continue
flat_param = handle.flat_param
handle.flat_param_to(torch.device(f"cuda:{device_id}"), non_blocking=True)
# the following still keeps id(._local_shard) != id(.data)
flat_param._local_shard = flat_param.data
@torch.no_grad()
def offload_fsdp_optimizer(optimizer):
if not optimizer.state:
return
for param_group in optimizer.param_groups:
for param in param_group['params']:
state = optimizer.state[param]
for key, value in state.items():
if isinstance(value, torch.Tensor):
state[key] = value.to("cpu", non_blocking=True)
@torch.no_grad()
def load_fsdp_optimizer(optimizer, device_id):
if not optimizer.state:
return
for param_group in optimizer.param_groups:
for param in param_group['params']:
state = optimizer.state[param]
for key, value in state.items():
if isinstance(value, torch.Tensor):
state[key] = value.to(device_id, non_blocking=True)
@contextmanager
def meta_device_init():
"""
Create model parameters with meta device.
Note buffers in model will still be initialized in default device (e.g., CPU),
since the buffers can be non-persistent and filled with expected values that can
NOT be captured in meta device.
"""
device = torch.device("meta")
old_register_parameter = nn.Module.register_parameter
registered = set()
def register_empty_parameter(module, name, param):
old_register_parameter(module, name, param)
# we will skip register shared parameters as it
# is already registered previously
if param is not None and param not in registered:
param_cls = type(module._parameters[name])
kwargs = module._parameters[name].__dict__
kwargs["requires_grad"] = param.requires_grad
module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs)
registered.add(module._parameters[name])
try:
nn.Module.register_parameter = register_empty_parameter
yield
finally:
registered.clear()
nn.Module.register_parameter = old_register_parameter
def parallel_load_safetensors(filepath):
"""
Parallel load safetensors from huggingface checkpoint
Huggingface checkpoint contains:
- config.json: a json file for model configuration
- model.safetensor.index.json: a json file for safetensors (parameters & buffers) index
- model-000x-of-ooxx.safetensors: a binary file for safetensors (parameters & buffers) chunks
Or (when model is small),
- model.safetensors: a binary file for all parameters and buffers
Each rank will own a part of model chunks and load them directly into GPU memory.
"""
from safetensors.torch import load_file
safetensors2param = {}
index_file = os.path.join(filepath, "model.safetensors.index.json")
if os.path.exists(index_file):
index = json.load(open(index_file, "rb"))
for param_name, filename in index["weight_map"].items():
safetensors2param.setdefault(filename, []).append(param_name)
else:
# in this case, the model is small and we can load it all at once
param_file = os.path.join(filepath, "model.safetensors")
assert os.path.exists(param_file), f"Cannot find {param_file}"
states = load_file(param_file)
for param_name in states:
safetensors2param.setdefault("model.safetensors", []).append(param_name)
del states
total_files = len(safetensors2param)
ckpt_chunks = sorted(safetensors2param.keys())
world_size = dist.get_world_size()
size = int(math.ceil(total_files / world_size))
ckpt_chunks = [ckpt_chunks[rank * size:rank * size + size] for rank in range(world_size)]
shard_states = {}
device = torch.cuda.current_device()
for rank, files in enumerate(ckpt_chunks):
if rank == dist.get_rank():
for file in files:
file = os.path.join(filepath, file)
states = load_file(file, device=device)
# print(f"rank {rank} loading {file}...")
shard_states.update(states)
else:
for file in files:
for param_name in safetensors2param[file]:
shard_states[param_name] = rank
return shard_states
def parallel_init_module_fn(module: torch.nn.Module, shard_states: Dict[str, torch.nn.Parameter]):
"""
Generate a function to initialize sub-modules in the `module` with `shard_states`
from huggingface checkpoint.
Args:
module (torch.nn.Module): the global module to be initialized
shard_states (Dict[str, torch.nn.Parameter]): the shard states from huggingface checkpoint
Returns:
init_fn (Callable): a function to initialize sub-modules in the `module` with `shard_states`
"""
state2fqn = {}
for name, state in itertools.chain(module.named_parameters(remove_duplicate=False),
module.named_buffers(remove_duplicate=False)):
state2fqn.setdefault(state, []).append(name)
# remove standalone parameters and buffers
shared = {s for s, names in state2fqn.items() if len(names) > 1}
materialized_states = {}
@torch.no_grad()
def create_and_sync_state(param_name, state, is_param):
assert param_name in shard_states, f"{param_name} not loaded"
device = torch.cuda.current_device()
if is_param:
param = torch.nn.Parameter(torch.empty_like(state.data, device=device), requires_grad=state.requires_grad)
else: # buffer
param = torch.empty_like(state.data, device=device)
loaded = shard_states[param_name]
if isinstance(loaded, (torch.nn.Parameter, torch.Tensor)):
# NOTE: loaded.dtype can be different with param.dtype
param.data.copy_(loaded.data)
dist.broadcast(param.data, src=dist.get_rank())
else:
assert isinstance(loaded, int) # the rank that holds the state
dist.broadcast(param.data, src=loaded)
shard_states.pop(param_name)
del loaded
return param
def init_fn(sub_mod: torch.nn.Module, recurse: bool = True):
param_and_buffers = tuple(sub_mod.named_parameters(recurse=False)) + tuple(sub_mod.named_buffers(recurse=False))
# param_and_buffers = sorted(sub_mod.named_parameters(recurse=False), key=lambda x: x[0])
for name, state in param_and_buffers:
if not state.is_meta:
continue
is_param = name in sub_mod._parameters
fqn = state2fqn[state].pop(0)
# non-persistent buffers will not be saved in state dict, we can safely skip it
if (not is_param) and fqn not in shard_states:
if state.is_meta:
raise RuntimeError(
f"find a non-persistent buffer ({fqn}) initiated with device meta. "
"Such buffer is not saved in checkpoint and user should guarantee to init in CPU / GPU device.")
continue
# for shared parameter, we get it from the first time it is created
if state in shared:
if state not in materialized_states:
materialized_states[state] = create_and_sync_state(fqn, state, is_param)
else:
if fqn in shard_states:
shard_states.pop(fqn)
materialize_state = materialized_states[state]
# for not shared parameter, we create it directly
else:
materialize_state = create_and_sync_state(fqn, state, is_param)
if is_param:
sub_mod._parameters[name] = materialize_state
else:
sub_mod._buffers[name] = materialize_state
if recurse:
for module in sub_mod.children():
init_fn(module, recurse=True)
# for debug
# if len(shard_states) == 0: print("clear")
return sub_mod
return init_fn
================================================
FILE: verl/utils/hdfs_io.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import logging
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv('VERL_SFT_LOGGING_LEVEL', 'WARN'))
_HDFS_PREFIX = "hdfs://"
_HDFS_BIN_PATH = shutil.which('hdfs')
def exists(path: str, **kwargs) -> bool:
r"""Works like os.path.exists() but supports hdfs.
Test whether a path exists. Returns False for broken symbolic links.
Args:
path (str): path to test
Returns:
bool: True if the path exists, False otherwise
"""
if _is_non_local(path):
return _exists(path, **kwargs)
return os.path.exists(path)
def _exists(file_path: str):
""" hdfs capable to check whether a file_path is exists """
if file_path.startswith("hdfs"):
return _run_cmd(_hdfs_cmd(f"-test -e {file_path}")) == 0
return os.path.exists(file_path)
def makedirs(name, mode=0o777, exist_ok=False, **kwargs) -> None:
r"""Works like os.makedirs() but supports hdfs.
Super-mkdir; create a leaf directory and all intermediate ones. Works like
mkdir, except that any intermediate path segment (not just the rightmost)
will be created if it does not exist. If the target directory already
exists, raise an OSError if exist_ok is False. Otherwise no exception is
raised. This is recursive.
Args:
name (str): directory to create
mode (int): file mode bits
exist_ok (bool): if True, do not raise an exception if the directory already exists
kwargs: keyword arguments for hdfs
"""
if _is_non_local(name):
# TODO(haibin.lin):
# - handle OSError for hdfs(?)
# - support exist_ok for hdfs(?)
_mkdir(name, **kwargs)
else:
os.makedirs(name, mode=mode, exist_ok=exist_ok)
def _mkdir(file_path: str) -> bool:
"""hdfs mkdir"""
if file_path.startswith("hdfs"):
_run_cmd(_hdfs_cmd(f"-mkdir -p {file_path}"))
else:
os.makedirs(file_path, exist_ok=True)
return True
def copy(src: str, dst: str, **kwargs) -> bool:
r"""Works like shutil.copy() for file, and shutil.copytree for dir, and supports hdfs.
Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If source and destination are the same file, a SameFileError will be
raised.
Arg:
src (str): source file path
dst (str): destination file path
kwargs: keyword arguments for hdfs copy
Returns:
str: destination file path
"""
if _is_non_local(src) or _is_non_local(dst):
# TODO(haibin.lin):
# - handle SameFileError for hdfs files(?)
# - return file destination for hdfs files
return _copy(src, dst)
else:
if os.path.isdir(src):
return shutil.copytree(src, dst, **kwargs)
else:
return shutil.copy(src, dst, **kwargs)
def _copy(from_path: str, to_path: str, timeout: int = None) -> bool:
if to_path.startswith("hdfs"):
if from_path.startswith("hdfs"):
returncode = _run_cmd(_hdfs_cmd(f"-cp -f {from_path} {to_path}"), timeout=timeout)
else:
returncode = _run_cmd(_hdfs_cmd(f"-put -f {from_path} {to_path}"), timeout=timeout)
else:
if from_path.startswith("hdfs"):
returncode = _run_cmd(_hdfs_cmd(f"-get \
{from_path} {to_path}"), timeout=timeout)
else:
try:
shutil.copy(from_path, to_path)
returncode = 0
except shutil.SameFileError:
returncode = 0
except Exception as e:
logger.warning(f"copy {from_path} {to_path} failed: {e}")
returncode = -1
return returncode == 0
def _run_cmd(cmd: str, timeout=None):
return os.system(cmd)
def _hdfs_cmd(cmd: str) -> str:
return f"{_HDFS_BIN_PATH} dfs {cmd}"
def _is_non_local(path: str):
return path.startswith(_HDFS_PREFIX)
================================================
FILE: verl/utils/import_utils.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities to check if packages are available.
We assume package availability won't change during runtime.
"""
from functools import cache
from typing import List
@cache
def is_megatron_core_available():
try:
from megatron.core import parallel_state as mpu
return True
except ImportError:
return False
@cache
def is_vllm_available():
try:
import vllm
return True
except ImportError:
return False
def import_external_libs(external_libs=None):
if external_libs is None:
return
if not isinstance(external_libs, List):
external_libs = [external_libs]
import importlib
for external_lib in external_libs:
importlib.import_module(external_lib)
================================================
FILE: verl/utils/logger/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/utils/logger/aggregate_logger.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A Ray logger will receive logging info from different processes.
"""
import numbers
from typing import Dict
def concat_dict_to_str(dict: Dict, step):
output = [f'step:{step}']
for k, v in dict.items():
if isinstance(v, numbers.Number):
output.append(f'{k}:{v:.3f}')
output_str = ' - '.join(output)
return output_str
class LocalLogger:
def __init__(self, remote_logger=None, enable_wandb=False, print_to_console=False):
self.print_to_console = print_to_console
if print_to_console:
print('Using LocalLogger is deprecated. The constructor API will change ')
def flush(self):
pass
def log(self, data, step):
if self.print_to_console:
print(concat_dict_to_str(data, step=step), flush=True)
================================================
FILE: verl/utils/logging_utils.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
def set_basic_config(level):
"""
This function sets the global logging format and level. It will be called when import verl
"""
logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s', level=level)
================================================
FILE: verl/utils/megatron/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/utils/megatron/memory.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
class MemoryBuffer:
def __init__(self, numel, numel_padded, dtype):
self.numel = numel
self.numel_padded = numel_padded
self.dtype = dtype
self.data = torch.zeros(self.numel_padded,
dtype=self.dtype,
device=torch.cuda.current_device(),
requires_grad=False)
def zero(self):
"""Reset the buffer to zero."""
self.data.zero_()
def get(self, shape, start_index):
"""Return a tensor with the input `shape` as a view into the
1-D data starting at `start_index`."""
end_index = start_index + shape.numel()
assert end_index <= self.numel, \
'requested tensor is out of the buffer range.'
buffer_tensor = self.data[start_index:end_index]
buffer_tensor = buffer_tensor.view(shape)
return buffer_tensor
================================================
FILE: verl/utils/megatron/optimizer.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from packaging.version import Version
from apex.optimizers import FusedAdam as Adam
from apex.optimizers import FusedSGD as SGD
from megatron.core.optimizer import OptimizerConfig
from megatron.core.optimizer import get_megatron_optimizer as get_megatron_optimizer_native
def get_megatron_optimizer(
model,
config: OptimizerConfig,
no_weight_decay_cond=None,
scale_lr_cond=None,
lr_mult=1.0,
check_for_nan_in_loss_and_grad=False,
overlap_param_gather=False # add for verl
):
# Base optimizer.
return get_megatron_optimizer_native(config=config,
model_chunks=model,
no_weight_decay_cond=no_weight_decay_cond,
scale_lr_cond=scale_lr_cond,
lr_mult=lr_mult)
================================================
FILE: verl/utils/megatron/pipeline_parallel.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from megatron.core import parallel_state as mpu
from .sequence_parallel import pad_to_sequence_parallel
def compute_transformers_input_shapes(batches, meta_info):
from flash_attn.bert_padding import unpad_input # flash 2 is a must for Megatron
# pre-compute input shapes for each micro-batch at each pp stage
input_shapes = []
for model_inputs in batches:
input_ids = model_inputs['input_ids']
attention_mask = model_inputs['attention_mask']
input_ids_rmpad = unpad_input(input_ids.unsqueeze(dim=-1), attention_mask)[0] # (total_nnz, 1)
if meta_info['sequence_parallel']:
input_ids_rmpad = pad_to_sequence_parallel(input_ids_rmpad)
# compute shapes for model_inputs
input_shapes.append(
torch.Size([
input_ids_rmpad.shape[0] // mpu.get_tensor_model_parallel_world_size(), 1, meta_info['hidden_size']
]))
else:
# compute shapes for model_inputs
input_shapes.append(torch.Size([input_ids_rmpad.shape[0], 1, meta_info['hidden_size']]))
return input_shapes
def make_batch_generator(batches, vpp_size):
if vpp_size > 1:
# has vpp
batch_generator = [batches] * vpp_size # number of vpp chunks
batch_generator = [iter(b) for b in batch_generator]
else:
# no vpp
batch_generator = iter(batches)
return batch_generator
================================================
FILE: verl/utils/megatron/sequence_parallel.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from megatron.core import parallel_state as mpu
def mark_parameter_as_sequence_parallel(parameter):
setattr(parameter, 'sequence_parallel', True)
def is_sequence_parallel_param(param):
return hasattr(param, 'sequence_parallel') and param.sequence_parallel
def pad_to_sequence_parallel(unpad_tokens: torch.Tensor):
"""pad the tokens such that the total length is a multiple of sp world size
Args:
unpad_tokens: (total_nnz, ...). Tokens after removing padding
Returns:
"""
total_nnz = unpad_tokens.shape[0]
sp_world_size = mpu.get_tensor_model_parallel_world_size()
if total_nnz % sp_world_size == 0:
pad_size = 0
else:
pad_size = sp_world_size - total_nnz % sp_world_size
if pad_size > 0:
if unpad_tokens.ndim == 1:
unpad_tokens = F.pad(unpad_tokens, (0, pad_size))
elif unpad_tokens.ndim == 2:
unpad_tokens = F.pad(unpad_tokens, (0, 0, 0, pad_size))
else:
raise NotImplementedError(f'Padding dim {unpad_tokens.ndim()} is not supported')
return unpad_tokens
================================================
FILE: verl/utils/megatron/tensor_parallel.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for using tensor_parallel in megatron
"""
from typing import Dict
import torch
from torch.nn import init
import torch.distributed as dist
from megatron.core import ModelParallelConfig
from megatron.core import parallel_state as mpu, tensor_parallel
import verl.utils.torch_functional as verl_F
def update_kwargs_with_config(dictionary: Dict, config: ModelParallelConfig):
dictionary['config'] = config
return dictionary
def get_default_kwargs_for_model_parallel_config():
model_parallel_config_kwargs = {
'params_dtype': torch.float32,
'use_cpu_initialization': False,
'perform_initialization': True,
'gradient_accumulation_fusion': False,
'sequence_parallel': False,
}
return model_parallel_config_kwargs
def get_default_model_parallel_config():
return ModelParallelConfig(**get_default_kwargs_for_model_parallel_config())
def get_common_default_kwargs_for_parallel_linear():
default_model_parallel_config = get_default_model_parallel_config()
common_default_kwargs = {
'init_method': init.xavier_normal_,
'stride': 1,
'keep_master_weight_for_test': False,
'config': default_model_parallel_config,
}
return common_default_kwargs
def get_default_kwargs_for_column_parallel_linear():
model_parallel_config_kwargs = get_default_kwargs_for_model_parallel_config()
column_parallel_config_kwargs = {
'async_tensor_model_parallel_allreduce': False,
}
model_parallel_config_kwargs.update(column_parallel_config_kwargs)
column_default_kwargs = {
'config': ModelParallelConfig(**model_parallel_config_kwargs),
}
common_default_kwargs = get_common_default_kwargs_for_parallel_linear()
common_default_kwargs.update(column_default_kwargs)
return common_default_kwargs
def get_default_kwargs_for_row_parallel_linear():
common_default_kwargs = get_common_default_kwargs_for_parallel_linear()
return common_default_kwargs
def get_default_kwargs_for_parallel_embedding():
model_parallel_config_kwargs = get_default_kwargs_for_model_parallel_config()
embedding_default_kwargs = {
'init_method': init.xavier_normal_,
'config': ModelParallelConfig(**model_parallel_config_kwargs),
}
return embedding_default_kwargs
def is_tensor_parallel_param(param):
return (hasattr(param, 'tensor_model_parallel') and param.tensor_model_parallel)
def get_tensor_parallel_partition_dim(param):
assert is_tensor_parallel_param(param)
return param.partition_dim
def get_tensor_parallel_partition_stride(param):
assert is_tensor_parallel_param(param)
return param.partition_stride
class _VocabParallelEntropy(torch.autograd.Function):
@staticmethod
def forward(ctx, vocab_parallel_logits: torch.Tensor) -> torch.Tensor:
logits_max = vocab_parallel_logits.max(dim=-1, keepdim=True).values
dist.all_reduce(logits_max, op=dist.ReduceOp.MAX, group=mpu.get_tensor_model_parallel_group())
normalized_vocab_parallel_logits = vocab_parallel_logits - logits_max
normalized_exp_logits = normalized_vocab_parallel_logits.exp()
normalized_sum_exp_logits = normalized_exp_logits.sum(dim=-1, keepdim=True)
dist.all_reduce(normalized_sum_exp_logits, group=mpu.get_tensor_model_parallel_group())
softmax_logits = normalized_exp_logits / normalized_sum_exp_logits
sum_softmax_times_logits = (softmax_logits * vocab_parallel_logits).sum(dim=-1, keepdim=True)
dist.all_reduce(sum_softmax_times_logits, group=mpu.get_tensor_model_parallel_group())
entropy = logits_max + normalized_sum_exp_logits.log() - sum_softmax_times_logits
ctx.save_for_backward(vocab_parallel_logits, softmax_logits, sum_softmax_times_logits)
return entropy.squeeze(dim=-1)
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
vocab_parallel_logits, softmax_logits, sum_softmax_times_logits = ctx.saved_tensors
grad_input = grad_output.unsqueeze(dim=-1) * softmax_logits * (sum_softmax_times_logits - vocab_parallel_logits)
return grad_input
def vocab_parallel_entropy(vocab_parallel_logits: torch.Tensor) -> torch.Tensor:
"""Compute entropy when the logits are sharded in tp ranks
Args:
vocab_parallel_logits: (total_nnz, vocab_size // tp_size)
Returns: (total_nnz,)
"""
return _VocabParallelEntropy.apply(vocab_parallel_logits)
def vocab_parallel_log_probs_from_logits(logits, labels):
"""TODO(zhangchi.usc1992): We may change the implementation later"""
return -tensor_parallel.vocab_parallel_cross_entropy(vocab_parallel_logits=logits, target=labels)
def vocab_parallel_log_probs_from_logits_response_rmpad(input_ids, attention_mask, logits_rmpad, response_length):
"""Similar to log_probs_from_logits_response_rmpad, but the logits_rmpad is now spliited across tensor parallel region.
This will further reduce the peak memory usage during training
Args:
input_ids: [batch_size, seqlen]
attention_mask: [batch_size, seqlen]
logits_rmpad: [total_nnz, vocab_size // tp_size]
response_length: int
"""
from flash_attn.bert_padding import pad_input, unpad_input
batch_size, seqlen = input_ids.shape
input_ids_rmpad, indices, *_ = unpad_input(input_ids.unsqueeze(-1), attention_mask=attention_mask)
input_ids_rmpad = input_ids_rmpad.squeeze(-1)
input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=0)
full_log_probs_rmpad = vocab_parallel_log_probs_from_logits(logits=logits_rmpad,
labels=input_ids_rmpad_rolled) # (total_nnz,)
full_output = pad_input(hidden_states=full_log_probs_rmpad.unsqueeze(-1),
indices=indices,
batch=batch_size,
seqlen=seqlen)
output = full_output.squeeze(-1)[:, -response_length - 1:-1] # [batch_size, response_length]
return output
def vocab_parallel_compute_entropy_loss(logits, eos_mask):
"""Compute Categorical entropy loss
Args:
logits: `(torch.Tensor)`
shape: (bs, response_length, vocab_size)
eos_mask: `(torch.Tensor)`
shape: (bs, response_length)
Returns:
entropy: a scalar torch.Tensor
"""
# compute entropy
entropy = vocab_parallel_entropy(logits)
entropy_loss = verl_F.masked_mean(entropy, mask=eos_mask)
return entropy_loss
================================================
FILE: verl/utils/megatron_utils.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretrain utilities."""
import importlib
from packaging.version import Version
from typing import Any, Dict
import time
from omegaconf import DictConfig
from verl.utils.torch_dtypes import PrecisionType
from verl.utils.memory_buffer import build_memory_reference_from_module
import torch
import torch.nn as nn
import torch.nn.functional as F
from megatron.core import mpu, tensor_parallel
from megatron.core.utils import get_attr_wrapped_model
from megatron.core.transformer import TransformerConfig
from megatron.core.transformer.module import Float16Module
from megatron.core.distributed import DistributedDataParallelConfig
from megatron.core.distributed import DistributedDataParallel as DDP
from megatron.core.enums import ModelType
from megatron.core import ModelParallelConfig
from megatron.core.optimizer import OptimizerConfig
def get_model_config(model):
return get_attr_wrapped_model(model, 'megatron_config', allow_none=False)
def get_model(model_provider_func, model_type=ModelType.encoder_or_decoder, wrap_with_ddp=True):
"""Build the model."""
# Build model.
if mpu.get_pipeline_model_parallel_world_size() > 1 and \
mpu.get_virtual_pipeline_model_parallel_world_size() is not None:
assert model_type != ModelType.encoder_and_decoder, \
"Interleaved schedule not supported for model with both encoder and decoder"
model = []
for i in range(mpu.get_virtual_pipeline_model_parallel_world_size()):
mpu.set_virtual_pipeline_model_parallel_rank(i)
# Set pre_process and post_process only after virtual rank is set.
pre_process = mpu.is_pipeline_first_stage()
post_process = mpu.is_pipeline_last_stage()
this_model = model_provider_func(pre_process=pre_process, post_process=post_process)
this_model.model_type = model_type
model.append(this_model)
else:
pre_process = mpu.is_pipeline_first_stage()
post_process = mpu.is_pipeline_last_stage()
add_encoder = True
add_decoder = True
if model_type == ModelType.encoder_and_decoder:
if mpu.get_pipeline_model_parallel_world_size() > 1:
assert mpu.get_pipeline_model_parallel_split_rank() is not None, \
"Split rank needs to be specified for model with both encoder and decoder"
rank = mpu.get_pipeline_model_parallel_rank()
split_rank = mpu.get_pipeline_model_parallel_split_rank()
world_size = mpu.get_pipeline_model_parallel_world_size()
pre_process = rank == 0 or rank == split_rank
post_process = (rank == (split_rank - 1)) or (rank == (world_size - 1))
add_encoder = mpu.is_pipeline_stage_before_split()
add_decoder = mpu.is_pipeline_stage_after_split()
model = model_provider_func(pre_process=pre_process,
post_process=post_process,
add_encoder=add_encoder,
add_decoder=add_decoder)
else:
model = model_provider_func(pre_process=pre_process, post_process=post_process)
model.model_type = model_type
if not isinstance(model, list):
model = [model]
# Set tensor model parallel attributes if not set.
# Only parameters that are already tensor model parallel have these
# attributes set for them. We should make sure the default attributes
# are set for all params so the optimizer can use them.
for model_module in model:
for param in model_module.parameters():
tensor_parallel.set_defaults_if_not_set_tensor_model_parallel_attributes(param)
# Print number of parameters.
if mpu.get_data_parallel_rank() == 0:
print(' > number of parameters on (tensor, pipeline) '
'model parallel rank ({}, {}): {}'.format(
mpu.get_tensor_model_parallel_rank(), mpu.get_pipeline_model_parallel_rank(),
sum([sum([p.nelement() for p in model_module.parameters()]) for model_module in model])),
flush=True)
# GPU allocation.
for model_module in model:
model_module.cuda(torch.cuda.current_device())
# Fp16 conversion.
config: ModelParallelConfig = get_model_config(model[0])
config.fp8 = None
tfconfig: TransformerConfig = convert_config(model[0].config, config)
if config.fp16 or config.bf16: # the ModelParallelConfig in GPTModel
model = [Float16Module(config, model_module) for model_module in model]
if wrap_with_ddp:
ddp_models = []
for model_chunk_idx, model_chunk in enumerate(model):
ddp_model = DDP(
config=tfconfig,
module=model_chunk,
disable_bucketing=(model_chunk_idx > 0),
ddp_config=DistributedDataParallelConfig(
overlap_grad_reduce=False,
use_distributed_optimizer=True,
grad_reduce_in_fp32=True, # [old] accumulate_allreduce_grads_in_fp32=True,
))
ddp_models.append(ddp_model)
model = ddp_models
# # Broadcast params from data parallel src rank to other data parallel ranks.
# # if args.data_parallel_random_init:
for model_module in model:
model_module.broadcast_params()
return model
ALL_MODULE_WRAPPER_CLASSNAMES = (DDP, Float16Module)
def unwrap_model(model, module_instances=ALL_MODULE_WRAPPER_CLASSNAMES):
return_list = True
if not isinstance(model, list):
model = [model]
return_list = False
unwrapped_model = []
for model_module in model:
while isinstance(model_module, module_instances):
model_module = model_module.module
unwrapped_model.append(model_module)
if not return_list:
return unwrapped_model[0]
return unwrapped_model
from transformers import PretrainedConfig
def convert_config(hf_config: PretrainedConfig, megatron_config) -> TransformerConfig:
print(f'megatron config {megatron_config}')
dt = PrecisionType.to_dtype(megatron_config.params_dtype)
print(f'pipeline_dtype=megatron_config {dt}')
transformer_config = TransformerConfig(
num_layers=hf_config.num_hidden_layers,
hidden_size=hf_config.hidden_size,
num_attention_heads=hf_config.num_attention_heads,
num_query_groups=hf_config.num_key_value_heads,
ffn_hidden_size=hf_config.intermediate_size,
# max_position_embeddings=hf_config.max_position_embeddings,
activation_func=F.silu,
normalization='RMSNorm',
# rotary_percent=False, # default,
gated_linear_unit=True, # for llama
use_cpu_initialization=True,
apply_residual_connection_post_layernorm=False, # check what's this mean
add_bias_linear=False,
tensor_model_parallel_size=mpu.get_tensor_model_parallel_world_size(),
pipeline_model_parallel_size=mpu.get_pipeline_model_parallel_world_size(),
virtual_pipeline_model_parallel_size=mpu.get_virtual_pipeline_model_parallel_world_size(),
pipeline_dtype=dt,
params_dtype=dt,
sequence_parallel=True,
variable_seq_lengths=True,
masked_softmax_fusion=True,
moe_token_dispatcher_type="alltoall",
bf16=dt is torch.bfloat16)
if torch.distributed.get_rank() == 0:
print(f'tensor_parallel_size={transformer_config.tensor_model_parallel_size} \n \
pipeline_model_parallel_size={transformer_config.pipeline_model_parallel_size} \n \
virtual_pipeline_model_parallel_size={transformer_config.virtual_pipeline_model_parallel_size} \n \
pipeline_dtype={transformer_config.pipeline_dtype} \n \
params_dtype={transformer_config.params_dtype} \n \
sequence_parallel={transformer_config.sequence_parallel} \n \
variable_seq_lengths={transformer_config.variable_seq_lengths} \n \
masked_softmax_fusion={transformer_config.masked_softmax_fusion} \n ')
return transformer_config
def init_megatron_optim_config(optim_config: Dict) -> OptimizerConfig:
config = OptimizerConfig(
optimizer='adam',
lr=optim_config.get('lr'),
clip_grad=optim_config.get('clip_grad'),
weight_decay=1e-2,
bf16=True,
params_dtype=torch.bfloat16,
use_distributed_optimizer=True,
)
return config
def init_model_parallel_config(config: DictConfig) -> ModelParallelConfig:
# TODO(sgm): check how to disable megatron timers
timers = None
return ModelParallelConfig(tensor_model_parallel_size=config.get('tensor_model_parallel_size'),
pipeline_model_parallel_size=config.get('pipeline_model_parallel_size'),
virtual_pipeline_model_parallel_size=config.get('virtual_pipeline_model_parallel_size'),
sequence_parallel=config.get('sequence_parallel'),
params_dtype=PrecisionType.to_dtype(config.get('param_dtype')),
pipeline_dtype=PrecisionType.to_dtype(config.get('param_dtype')),
bf16=True,
fp16=False,
timers=timers)
def offload_megatron_param_and_grad(module_list: nn.ModuleList, offload_grad=False, hybrid_engine=None):
if hybrid_engine is not None:
pp_rank = mpu.get_pipeline_model_parallel_rank()
for buffer in hybrid_engine.memory_buffers[pp_rank].values():
buffer.data = buffer.data.to('cpu', non_blocking=True)
build_memory_reference_from_module(module_list, hybrid_engine.memory_buffers[pp_rank], maintain_weight=True)
else:
for module in module_list:
for _, param in module.named_parameters():
param.data = param.data.to('cpu', non_blocking=True)
if offload_grad and param.grad is not None:
param.grad = param.grad.to("cpu", non_blocking=True)
torch.cuda.empty_cache()
def load_megatron_param_and_grad(module_list: nn.ModuleList, device_id, load_grad=False, hybrid_engine=None):
if hybrid_engine is not None:
pp_rank = mpu.get_pipeline_model_parallel_rank()
for buffer in hybrid_engine.memory_buffers[pp_rank].values():
buffer.data = buffer.data.to(device_id, non_blocking=True)
build_memory_reference_from_module(module_list, hybrid_engine.memory_buffers[pp_rank], maintain_weight=True)
else:
for module in module_list:
for _, param in module.named_parameters():
param.data = param.data.to(device_id, non_blocking=True)
if load_grad and param.grad is not None:
param.grad = param.grad.to(device_id, non_blocking=True)
torch.cuda.empty_cache()
================================================
FILE: verl/utils/memory_buffer.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains utilities to manipulate torch memory buffers
"""
from typing import Dict, List, Optional
import torch
from torch import nn
class MemoryBuffer:
"""
A memory buffer is a contiguous torch tensor that may combine multiple tensors sharing with the underlying
memory. It must have a unique type to support this behavior.
"""
def __init__(self, numel: int, numel_padded: int, dtype: torch.dtype, source: Optional[torch.Tensor] = None):
self.numel = numel
self.numel_padded = numel_padded
self.dtype = dtype
if source is not None:
self.data = source
else:
self.data = torch.zeros(self.numel_padded, dtype=self.dtype, device='cuda', requires_grad=False)
def zero(self):
"""Reset the buffer to zero."""
self.data.zero_()
def get(self, shape, start_index):
"""Return a tensor with the input `shape` as a view into the
1-D data starting at `start_index`."""
end_index = start_index + shape.numel()
assert end_index <= self.numel, \
'requested tensor is out of the buffer range.'
buffer_tensor = self.data[start_index:end_index]
buffer_tensor = buffer_tensor.view(shape)
return buffer_tensor
def calc_padded_numel(shape: torch.Size, dtype: torch.dtype):
"""for cuda memory alignment, make sure alignment by 128-bits"""
align_numel = 128 // torch.finfo(dtype).bits
numel = shape.numel()
return (numel + align_numel - 1) // align_numel * align_numel
def get_weight_buffer_meta_from_module(module: nn.Module) -> Dict[str, Dict]:
"""
Return a dictionary containing name to a shape and dtype.
"""
weight_buffer_meta = {}
for name, param in sorted(module.named_parameters()):
weight_buffer_meta[name] = {'shape': param.shape, 'dtype': param.dtype}
return weight_buffer_meta
def build_memory_buffer(weight_buffer_meta: Dict[str, Dict]) -> Dict[torch.dtype, MemoryBuffer]:
"""Build the memory buffer given weight_buffer_meta
Args:
weight_buffer_meta: contains mapping from name to a dictionary containing shape and dtype of the tensors
Returns: a large memory buffer for each dtype that can hold all the tensors
"""
memory_buffers = {}
total_numel_map = {} # map from dtype to the total numel
for name, meta_info in sorted(weight_buffer_meta.items()):
shape = meta_info['shape']
dtype = meta_info['dtype']
assert isinstance(shape, torch.Size)
assert isinstance(dtype, torch.dtype)
if dtype not in total_numel_map:
total_numel_map[dtype] = 0
total_numel_map[dtype] += calc_padded_numel(shape, dtype)
for dtype, total_numel in total_numel_map.items():
memory_buffers[dtype] = MemoryBuffer(total_numel, total_numel, dtype)
return memory_buffers
def build_memory_reference_from_module(module: torch.nn.Module,
memory_buffers: Dict[torch.dtype, MemoryBuffer],
maintain_weight=True):
start_index = {}
for dtype in memory_buffers.keys():
start_index[dtype] = 0
for name, param in sorted(module.named_parameters()):
memory_buffer = memory_buffers[param.dtype]
buffer = memory_buffer.get(shape=param.shape, start_index=start_index[param.dtype])
# need to increment start_index
start_index[param.dtype] += calc_padded_numel(param.shape, dtype)
if maintain_weight:
buffer.copy_(param.data)
param.data = buffer
def build_memory_reference(weight_buffer_meta: Dict[str, Dict], memory_buffers: Dict[torch.dtype, MemoryBuffer]):
"""Build the memory references. The memory buffers are built using the build_memory_buffer API.
This API will allocate a weight buffer pointer to the memory buffer according to the weight_buffer_meta.
Args:
weight_buffer_meta:
memory_buffers:
Returns:
"""
start_idx = {}
weight_buffers = {}
for dtype in memory_buffers.keys():
start_idx[dtype] = 0
for name, meta_info in sorted(weight_buffer_meta.items()):
shape = meta_info['shape']
dtype = meta_info['dtype']
buffer = memory_buffers[dtype].get(shape, start_index=start_idx[dtype])
start_idx[dtype] += calc_padded_numel(shape, dtype)
weight_buffers[name] = buffer
return weight_buffers
class MemoryBufferModuleWrapper:
"""
Note that we do not design MemoryBufferModuleWrapper as an nn.Module due to
- It will change the checkpoint name
"""
def __init__(self, module: nn.Module):
super().__init__()
self.module = module
self.weight_buffer_meta = get_weight_buffer_meta_from_module(self.module)
self.memory_buffers = build_memory_buffer(self.weight_buffer_meta)
build_memory_reference_from_module(self.module, self.memory_buffers)
def get_memory_buffers(self):
return self.memory_buffers
def get_weight_buffer_meta(self):
return self.weight_buffer_meta
class MegatronMemoryBufferForRollout(object):
"""
We assume that
- inference engine has tp + dp
- actor has tp + pp + dp
- the tp between inference engine and actor should be the same
- memory_buffers: contains a list of memory_buffers, each is a dict from dtype to MemoryBuffer
- weight_buffers: contains a list of weight_buffers, each is a dict from name to param
- named_parameters: a dict from name to parameter that normalizes the names from pp and vpp. Note that
the named_parameters may not be directly compatible with inference engine. User has to take care of
this part such as the layout mismatches. (e.g. qkv transpose)
- Note that weight_buffer, named_parameters and memory_buffers share the same underlying GPU memory.
- When doing weight sync, the data is transfer via memory buffers
"""
def __init__(self, transform_memory_param_fn):
self._memory_buffers = []
self._weight_buffers = []
self._named_parameters = {}
self.transform_memory_param_fn = transform_memory_param_fn
def initialize_weight_buffer(self, weight_buffer_meta_pp: List[Dict[str, Dict]]):
"""
Initialize the weight buffer. The weight buffer is obtained according to the actor. We will construct
a large buffer for each dtype in the weight_buffer.
Args:
weight_buffer_meta: contains pp models, each pp models contains a dictionary of mapping from
Returns: None
"""
self.weight_buffer_meta_pp = weight_buffer_meta_pp
for weight_buffer_meta in self.weight_buffer_meta_pp:
memory_buffer = build_memory_buffer(weight_buffer_meta)
self._memory_buffers.append(memory_buffer)
self._weight_buffers.append(None)
def build_memory_reference(self):
for i, weight_buffer_meta in enumerate(self.weight_buffer_meta_pp):
self._weight_buffers[i] = build_memory_reference(weight_buffer_meta, self._memory_buffers[i])
self._named_parameters = self.transform_memory_param_fn(self._weight_buffers)
@property
def named_parameters(self):
return self._named_parameters
@property
def weight_buffers(self):
return self._weight_buffers
@property
def memory_buffers(self):
return self._memory_buffers
================================================
FILE: verl/utils/model.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities to create common models from huggingface
"""
import os
import warnings
from typing import Dict, Type, Optional
import numpy as np
import torch
from torch import nn
from transformers import AutoConfig, AutoModelForCausalLM, PretrainedConfig, MistralForSequenceClassification, GenerationConfig
from verl.models.registry import ModelRegistry
class LambdaLayer(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def squeeze(x):
return torch.squeeze(x, dim=-1)
def update_model_config(module_config, override_config_kwargs):
for key, val in override_config_kwargs.items():
setattr(module_config, key, val)
def get_huggingface_actor_config(model_name: str, override_config_kwargs=None, trust_remote_code=False) -> Dict:
if override_config_kwargs is None:
override_config_kwargs = {}
assert isinstance(override_config_kwargs, Dict), \
f'override_config_kwargs must be a dict, got {type(override_config_kwargs)}'
module_config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code)
update_model_config(module_config, override_config_kwargs)
return module_config
def get_generation_config(
model: str,
trust_remote_code: bool = False,
) -> Optional[GenerationConfig]:
try:
return GenerationConfig.from_pretrained(model)
except OSError: # Not found
try:
config = get_huggingface_actor_config(
model,
trust_remote_code=trust_remote_code,
)
return GenerationConfig.from_model_config(config)
except OSError: # Not found
return None
def create_huggingface_actor(model_name: str, override_config_kwargs=None, automodel_kwargs=None) -> nn.Module:
"""
Args:
model_name:
override_config_kwargs:
Returns:
"""
if override_config_kwargs is None:
override_config_kwargs = {}
if automodel_kwargs is None:
automodel_kwargs = {}
assert isinstance(override_config_kwargs, Dict), \
f'override_config_kwargs must be a dict, got {type(override_config_kwargs)}'
module_config = get_huggingface_actor_config(model_name,
override_config_kwargs,
trust_remote_code=automodel_kwargs.get('trust_remote_code', False))
module: nn.Module = AutoModelForCausalLM.from_config(module_config, **automodel_kwargs)
return module
def create_huggingface_critic(model_name: str, override_config_kwargs=None, automodel_kwargs=None) -> nn.Module:
"""
Args:
model_name:
override_config_kwargs:
Returns:
"""
critic_module: nn.Module = create_huggingface_actor(model_name,
override_config_kwargs=override_config_kwargs,
automodel_kwargs=automodel_kwargs)
if automodel_kwargs is None:
automodel_kwargs = {}
torch_dtype = automodel_kwargs.get('torch_dtype', torch.float32)
critic_module.lm_head = nn.Sequential(nn.Linear(critic_module.config.hidden_size, 1, dtype=torch_dtype),
LambdaLayer(fn=squeeze))
return critic_module
def get_model_size(model: nn.Module, scale='auto'):
n_params = sum(p.numel() for p in model.parameters())
if scale == 'auto':
if n_params > 1e9:
scale = 'B'
elif n_params > 1e6:
scale = 'M'
elif n_params > 1e3:
scale = 'K'
else:
scale = ''
if scale == 'B':
n_params = n_params / 1e9
elif scale == 'M':
n_params = n_params / 1e6
elif scale == 'K':
n_params = n_params / 1e3
elif scale == '':
pass
else:
raise NotImplemented(f'Unknown scale {scale}')
return n_params, scale
def print_model_size(model: nn.Module, name: str = None):
n_params, scale = get_model_size(model, scale='auto')
if name is None:
name = model.__class__.__name__
print(f'{name} contains {n_params:.2f}{scale} parameters')
def create_random_mask(input_ids: torch.Tensor,
max_ratio_of_valid_token: float,
max_ratio_of_left_padding: float,
min_ratio_of_valid_token: float = 0):
"""Create a random mask given input_ids. Support left padding and right padding.
Process:
- Sample valid token length
- Sample left_padding length
- Generate padding
Args:
input_ids:
shape (batch_size, seq_len)
Returns:
"""
assert max_ratio_of_valid_token > 0 and max_ratio_of_valid_token <= 1.
assert max_ratio_of_left_padding >= 0 and max_ratio_of_left_padding < 1.
assert min_ratio_of_valid_token <= max_ratio_of_valid_token
batch_size, sequence_length = input_ids.shape
max_num_valid_tokens = int(sequence_length * max_ratio_of_valid_token)
min_num_valid_tokens = max(1, int(sequence_length * min_ratio_of_valid_token))
max_left_padding = int(sequence_length * max_ratio_of_left_padding)
assert max_num_valid_tokens + max_left_padding <= sequence_length
assert max_num_valid_tokens > 0 and max_ratio_of_valid_token <= sequence_length
masks = torch.ones_like(input_ids, dtype=torch.int64)
# TODO: we can make this faster
for i in range(batch_size):
num_left_padding = np.random.randint(low=0, high=max_left_padding + 1, dtype=np.int64)
num_valid = np.random.randint(low=min_num_valid_tokens, high=max_num_valid_tokens + 1, dtype=np.int64)
for index in range(num_left_padding):
masks[i, index] = 0
for index in range(num_left_padding + num_valid, sequence_length):
masks[i, index] = 0
return masks
def compute_position_id_with_mask(mask):
return torch.clip(torch.cumsum(mask, dim=-1) - 1, min=0, max=None)
def normalize_pp_vpp_params(params, num_hidden_layers, layer_name='layers'):
"""
Normalize the pp vpp params into a complete named parameters.
This is useful when gather parameters from pp ranks and passed to a model without pp
params: List[List[Dict[str, param]]]
params contains a list of pp, with a list of vpp named_parameters in each vpp chunk.
output: Dict[str, param]
"""
def normalize_model_name(name, pp_rank, vpp_rank, pp_size, vpp_size, num_layers):
"""
Transform the model name in each model_chunk in each pp stage into the name in inference engine
"""
if vpp_size > 1:
# print(f'try to bind vpp params to inference engine...')
layers_per_pp = num_layers // pp_size
layers_per_vpp = layers_per_pp // vpp_size
pp_offset = layers_per_vpp * pp_rank
vpp_offset = (layers_per_vpp * pp_size) * vpp_rank
layer_offset = pp_offset + vpp_offset
else:
layers_per_pp = num_layers // pp_size
layer_offset = layers_per_pp * pp_rank
if layer_name in name: # belong to an intermediate layer
split_name = name.split('.')
# find the num next to split_name
for i, name in enumerate(split_name):
if name == layer_name:
break
layer_num_idx = i + 1
# check the name
assert len(split_name) >= layer_num_idx + 1, f'split_name = {split_name}'
assert split_name[layer_num_idx].isdigit(), f'split_name = {split_name}'
# increment layer_num_idx by layer_offset
split_name[layer_num_idx] = str(int(split_name[layer_num_idx]) + layer_offset)
name = '.'.join(split_name) # weight name in inference_tp_model
return name
pp_size = len(params)
normalized_name_to_param = {}
for pp_rank in range(len(params)):
vpp_size = len(params[pp_rank])
for vpp_rank in range(vpp_size):
for name, param in params[pp_rank][vpp_rank].items():
normalized_name = normalize_model_name(name, pp_rank, vpp_rank, pp_size, vpp_size, num_hidden_layers)
normalized_name_to_param[normalized_name] = param
return normalized_name_to_param
def get_parallel_model_from_config(config,
megatron_config,
pre_process=None,
post_process=None,
share_embeddings_and_output_weights=False,
value=False):
from megatron.core import ModelParallelConfig
assert isinstance(megatron_config, ModelParallelConfig)
model_class = _get_parallel_model_architecture_from_config(config, value)
model = model_class(config,
megatron_config,
pre_process=pre_process,
post_process=post_process,
share_embeddings_and_output_weights=share_embeddings_and_output_weights)
return model
def _get_parallel_model_architecture_from_config(config: PretrainedConfig, value=False) -> Type[nn.Module]:
architectures = getattr(config, "architectures", [])
for arch in architectures:
model_cls = ModelRegistry.load_model_cls(arch, value)
if model_cls is not None:
return model_cls
raise ValueError(f"Model architectures {architectures} are not supported for now. "
f"Supported architectures: {ModelRegistry.get_supported_archs()}")
def load_megatron_model_weights(config,
model_config,
parallel_model,
params_dtype,
is_value_model=False,
local_cache_path='~/.cache/verl/rlhf'):
assert hasattr(model_config, "architectures"), "architectures cannot be empty when load weight!"
architectures = getattr(model_config, "architectures", [])
local_cache_path = os.path.expanduser(local_cache_path)
if config.model.path.startswith("hdfs:"):
from verl.utils.fs import copy_to_local
print(f'start download from {config.model.path}')
local_model_path = copy_to_local(src=config.model.path, cache_dir=local_cache_path)
print('finish download')
else:
print(f"load from local dir {config.model.path}")
local_model_path = config.model.path
# TODO: to find a better way to load mistral7b-rm lm_head
if 'mistral7b-rm' in config.model.path:
model = MistralForSequenceClassification.from_pretrained(local_model_path) # use score head instead of lm_head
state_dict = model.state_dict()
state_dict['lm_head.weight'] = state_dict['score.weight']
state_dict['model.embed_tokens.weight'] = state_dict[
'model.embed_tokens.weight'][:32000] # workaround, 32001 -> 32000
is_value_model = True
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
model = AutoModelForCausalLM.from_pretrained(local_model_path)
state_dict = model.state_dict()
from verl.models.weight_loader_registry import get_weight_loader
print(f'before weight loader: architectures = {architectures}...')
for arch in architectures:
print(f'call weight loader arch = {arch}, model config = {model.config}')
weight_loader = get_weight_loader(arch)
weight_loader(state_dict=state_dict,
wrapped_models=parallel_model,
config=model.config,
params_dtype=params_dtype,
is_value_model=is_value_model)
# pad input_ids_rmpad, cu_seqlens and max_seqlen_in_batch to be divisible by tp
def pad_packed_inputs(unpad_tokens: torch.Tensor, cu_seqlens, max_seqlen_in_batch, size):
"""pad the tokens such that the total length is a multiple of size.
This function is useful when applying sequence parallel and context parallel
Args:
unpad_tokens: (total_nnz, ...). Tokens after removing padding
cu_seqlens: (total_nnz + 1,)
max_seqlen_in_batch: int
Returns:
"""
F = nn.functional
total_nnz = unpad_tokens.shape[0]
if total_nnz % size == 0:
pad_size = 0
else:
pad_size = size - total_nnz % size
# we assume adding a new data in the batch with seqlen pad_size
if pad_size > 0:
if unpad_tokens.ndim == 1:
unpad_tokens = F.pad(unpad_tokens, (0, pad_size))
elif unpad_tokens.ndim == 2:
unpad_tokens = F.pad(unpad_tokens, (0, 0, 0, pad_size))
else:
raise NotImplementedError(f'Padding dim {unpad_tokens.ndim()} is not supported')
cu_seqlens = F.pad(cu_seqlens, (0, 1), value=pad_size + cu_seqlens[-1])
max_seqlen_in_batch = max(max_seqlen_in_batch, pad_size)
return unpad_tokens, cu_seqlens, max_seqlen_in_batch
================================================
FILE: verl/utils/py_functional.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contain small python utility functions
"""
from typing import Dict
from types import SimpleNamespace
def union_two_dict(dict1: Dict, dict2: Dict):
"""Union two dict. Will throw an error if there is an item not the same object with the same key.
Args:
dict1:
dict2:
Returns:
"""
for key, val in dict2.items():
if key in dict1:
assert dict2[key] == dict1[key], \
f'{key} in meta_dict1 and meta_dict2 are not the same object'
dict1[key] = val
return dict1
def append_to_dict(data: Dict, new_data: Dict):
for key, val in new_data.items():
if key not in data:
data[key] = []
data[key].append(val)
class NestedNamespace(SimpleNamespace):
def __init__(self, dictionary, **kwargs):
super().__init__(**kwargs)
for key, value in dictionary.items():
if isinstance(value, dict):
self.__setattr__(key, NestedNamespace(value))
else:
self.__setattr__(key, value)
================================================
FILE: verl/utils/ray_utils.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains commonly used utilities for ray
"""
import ray
import concurrent.futures
def parallel_put(data_list, max_workers=None):
def put_data(index, data):
return index, ray.put(data)
if max_workers is None:
max_workers = min(len(data_list), 16)
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
data_list_f = [executor.submit(put_data, i, data) for i, data in enumerate(data_list)]
res_lst = []
for future in concurrent.futures.as_completed(data_list_f):
res_lst.append(future.result())
# reorder based on index
output = [None for _ in range(len(data_list))]
for res in res_lst:
index, data_ref = res
output[index] = data_ref
return output
================================================
FILE: verl/utils/rendezvous/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/utils/rendezvous/ray_backend.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from cupy.cuda.nccl import NcclCommunicator, get_unique_id
import ray
from ray.util import list_named_actors
@ray.remote
class NCCLIDStore:
def __init__(self, nccl_id):
self._nccl_id = nccl_id
def get(self):
return self._nccl_id
def get_nccl_id_store_by_name(name):
all_actors = list_named_actors(all_namespaces=True)
matched_actors = [actor for actor in all_actors if actor.get("name", None) == name]
if len(matched_actors) == 1:
actor = matched_actors[0]
return ray.get_actor(**actor)
elif len(matched_actors) > 1:
logging.warning(f"multiple actors with same name found: {matched_actors}")
elif len(matched_actors) == 0:
logging.info(f"failed to get any actor named {name}")
return None
def create_nccl_communicator_in_ray(rank: int,
world_size: int,
group_name: str,
max_retries: int = 100,
interval_s: int = 5):
if rank == 0:
nccl_id = get_unique_id()
nccl_id_store = NCCLIDStore.options(name=group_name).remote(nccl_id)
assert ray.get(nccl_id_store.get.remote()) == nccl_id
communicator = NcclCommunicator(
ndev=world_size,
commId=nccl_id,
rank=0,
)
return communicator
else:
for i in range(max_retries):
nccl_id_store = get_nccl_id_store_by_name(group_name)
if nccl_id_store is not None:
logging.info(f"nccl_id_store {group_name} got")
nccl_id = ray.get(nccl_id_store.get.remote())
logging.info(f"nccl id for {group_name} got: {nccl_id}")
communicator = NcclCommunicator(
ndev=world_size,
commId=nccl_id,
rank=rank,
)
return communicator
logging.info(f"failed to get nccl_id for {i+1} time, sleep for {interval_s} seconds")
time.sleep(interval_s)
================================================
FILE: verl/utils/reward_score/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from . import gsm8k, math, prime_math, prime_code
def _default_compute_score(data_source, solution_str, ground_truth, extra_info=None, reward_type=None):
if data_source == 'openai/gsm8k':
from . import gsm8k
res = gsm8k.compute_score(solution_str, ground_truth)
elif data_source in ['lighteval/MATH', 'DigitalLearningGmbH/MATH-lighteval']:
from . import math
res = math.compute_score(solution_str, ground_truth)
elif data_source in [
'numina_aops_forum', 'numina_synthetic_math', 'numina_amc_aime', 'numina_synthetic_amc', 'numina_cn_k12',
'numina_olympiads'
]:
from . import prime_math
res = prime_math.compute_score(solution_str, ground_truth)
elif data_source in ['codecontests', 'apps', 'codeforces', 'taco']:
from . import prime_code
res = prime_code.compute_score(solution_str, ground_truth, continuous=True)
elif data_source in ['hiyouga/geometry3k']:
from . import geo3k
res = geo3k.compute_score(solution_str, ground_truth)
else:
from . import math_verifier
res = math_verifier.compute_score(solution_str, ground_truth, reward_type)
if isinstance(res, (int, float, bool)):
return float(res)
else:
return float(res[0])
================================================
FILE: verl/utils/reward_score/eval.py
================================================
import os
import json
import jsonlines
import re
import copy
PATTERNS=[
r"(?i)Answer\s*:\s*([^\n]+)",
r"\\boxed\{((?:[^{}]|\\{|\\}|(?:\{(?:[^{}]|\\{|\\}|(?:\{(?:[^{}]|\\{|\\}|(?:\{[^{}]*\}))*\}))*\}))*\})",
]
def extract_pattern(pred: str, pattern: str):
match = re.findall(pattern, pred)
# 从pred中extract出一个answerlist,代表所有可能的answer
if match:
extracted_answer = match[-1]
if pattern==r"\\boxed\{((?:[^{}]|\\{|\\}|(?:\{(?:[^{}]|\\{|\\}|(?:\{(?:[^{}]|\\{|\\}|(?:\{[^{}]*\}))*\}))*\}))*\})": extracted_answer=extracted_answer[:-1]
return extracted_answer.strip("*").strip().strip("*")
else:
return ""
SPLIT=[
"####"
"\n",
"Answer:",
]
def extract_split(pred: str, split: str):
'''
最后一个换行符之后的部分
'''
pred=pred.split(split)[-1]
return pred.strip("*").strip().strip("*")
def expansion(answer_list: str):
org_answer_list=copy.deepcopy(answer_list)
for answer in org_answer_list:
if "=" in answer:
answer_list.append(answer.split("=")[-1])
for choice in ["A", "B", "C", "D", "E", "F"]:
if f"({choice.upper()})" in answer.upper() or f"{choice.upper()}:" in answer.upper() or f"{choice.upper()}. " in answer.upper():
answer_list.append(f"{choice.upper()}")
break
for answer in org_answer_list:
pattern = r'^(\d+(\.\d+)?)\s+[a-zA-Z]+(?:\s+[a-zA-Z]+)*$'
if bool(re.match(pattern, answer)): answer_list.append(answer.split(" ")[0])
for answer in org_answer_list:
if "\\in" in answer:
answer_list.append(answer.split("\\in")[-1].strip())
if "\u2208" in answer:
answer_list.append(answer.split("\u2208")[-1].strip())
return answer_list
def extract(pred: str):
answer_list=[]
answer_list.append(pred.split("####")[-1].strip())
for split in SPLIT:
answer_list.append(extract_split(copy.deepcopy(pred), split=split))
for pattern in PATTERNS:
answer_list.append(extract_pattern(copy.deepcopy(pred), pattern=pattern))
answer_list=expansion(answer_list)
return answer_list
import re
SUBSTITUTIONS = [
("an ", ""),
# ("a ", ""),
(".$", "$"),
("\\$", ""),
(r"\ ", ""),
(" ", ""),
("mbox", "text"),
(",\\text{and}", ","),
("\\text{and}", ","),
("\\text{m}", "\\text{}"),
("\\left", ""),
("\\right", ""),
("∶", ":"),
(",", ","),
("$", ""),
("\\approx", "="),
("\\simeq", "="),
("\\sim", "="),
("^\\prime", "'"),
("^{\\prime}", "'"),
("\\dfrac", "\\frac"),
("\\tfrac", "\\frac"),
("^\\circ", ""),
("%", ""),
("\u221a", "\\sqrt"),
("\u221e", "\\infty"),
("\u222a", "\\cup"),
]
REMOVED_EXPRESSIONS = [
"square",
"ways",
"integers",
"dollars",
"mph",
"inches",
# "ft", #this is dangerous, infty, left will be damaged!
"hours",
"km",
"units",
"\\ldots",
"sue",
"points",
"feet",
"minutes",
"digits",
"cents",
"degrees",
"cm",
"gm",
"pounds",
"meters",
"meals",
"edges",
"students",
"childrentickets",
"multiples",
"\\text{s}",
"\\text{.}",
"\\text{\ns}",
"\\text{}^2",
"\\text{}^3",
"\\text{\n}",
"\\text{}",
r"\mathrm{th}",
r"^\circ",
r"^{\circ}",
r"\;",
r",\!",
"{,}",
'"',
"\\dots",
]
def normalize_final_answer(final_answer: str) -> str:
"""
Normalize a final answer to a quantitative reasoning question.
Copied character for character from appendix D of Lewkowycz et al. (2022)
"""
# final_answer = final_answer.split("=")[-1]
final_answer=final_answer.strip()
if final_answer[:2]=="\\(" or final_answer[:2]=='\\[':
final_answer=final_answer[2:]
if final_answer[-2:]=='\\)' or final_answer[-2:]=='\\]':
final_answer=final_answer[:-2]
for before, after in SUBSTITUTIONS:
final_answer = final_answer.replace(before, after)
for expr in REMOVED_EXPRESSIONS:
final_answer = final_answer.replace(expr, "")
# Extract answer that is in LaTeX math, is bold,
# is surrounded by a box, etc.
final_answer = re.sub(r"(.*?)(\$)(.*?)(\$)(.*)", "$\\3$", final_answer)
final_answer = re.sub(r"(\\text\{)(.*?)(\})", "\\2", final_answer)
final_answer = re.sub(r"(\\textbf\{)(.*?)(\})", "\\2", final_answer)
final_answer = re.sub(r"(\\overline\{)(.*?)(\})", "\\2", final_answer)
final_answer = re.sub(r"(\\boxed\{)(.*)(\})", "\\2", final_answer)
# Normalize shorthand TeX:
# \fracab -> \frac{a}{b}
# \frac{abc}{bef} -> \frac{abc}{bef}
# \fracabc -> \frac{a}{b}c
# \sqrta -> \sqrt{a}
# \sqrtab -> sqrt{a}b
final_answer = re.sub(r"(frac)([^{])(.)", "frac{\\2}{\\3}", final_answer)
final_answer = re.sub(r"(sqrt)([^{])", "sqrt{\\2}", final_answer)
final_answer = final_answer.replace("$", "")
# Normalize 100,000 -> 100000
if final_answer.replace(",", "").isdigit():
final_answer = final_answer.replace(",", "")
if final_answer[:2]=="\\(" or final_answer[:2]=='\\[':
final_answer=final_answer[2:]
if final_answer[-2:]=='\\)' or final_answer[-2:]=='\\]':
final_answer=final_answer[:-2]
return final_answer.strip()
"""
This logic is largely copied from the Hendrycks' MATH release (math_equivalence), and borrowed from:
- https://github.com/microsoft/ProphetNet/tree/master/CRITIC
- https://github.com/openai/prm800k
- https://github.com/microsoft/ToRA/blob/main/src/eval/grader.py
- https://github.com/deepseek-ai/DeepSeek-Math/blob/main/evaluation/eval/eval_utils.py
"""
import re
import regex
import multiprocessing
from math import isclose
from typing import Union
from collections import defaultdict
from sympy import simplify, N
from sympy.parsing.sympy_parser import parse_expr
from sympy.parsing.latex import parse_latex
# from latex2sympy2 import latex2sympy
# from .parser import choice_answer_clean, strip_string
# from parser import choice_answer_clean
def choice_answer_clean(pred: str):
pred = pred.strip("\n").rstrip(".").rstrip("/").strip(" ").lstrip(":")
# Clean the answer based on the dataset
tmp = re.findall(r"\b(A|B|C|D|E)\b", pred.upper())
if tmp:
pred = tmp
else:
pred = [pred.strip().strip(".")]
pred = pred[-1]
# Remove the period at the end, again!
pred = pred.rstrip(".").rstrip("/")
return pred
def parse_digits(num):
num = regex.sub(",", "", str(num))
try:
return float(num)
except:
if num.endswith("%"):
num = num[:-1]
if num.endswith("\\"):
num = num[:-1]
try:
return float(num) / 100
except:
pass
return None
def is_digit(num):
# paired with parse_digits
return parse_digits(num) is not None
def str_to_pmatrix(input_str):
input_str = input_str.strip()
matrix_str = re.findall(r"\{.*,.*\}", input_str)
pmatrix_list = []
for m in matrix_str:
m = m.strip("{}")
pmatrix = r"\begin{pmatrix}" + m.replace(",", "\\") + r"\end{pmatrix}"
pmatrix_list.append(pmatrix)
return ", ".join(pmatrix_list)
def math_equal(
prediction: Union[bool, float, str],
reference: Union[float, str],
include_percentage: bool = True,
is_close: bool = True,
timeout: bool = False,
) -> bool:
"""
Exact match of math if and only if:
1. numerical equal: both can convert to float and are equal
2. symbolic equal: both can convert to sympy expression and are equal
"""
# print("Judge:", prediction, reference)
if prediction is None or reference is None:
return False
if str(prediction.strip().lower()) == str(reference.strip().lower()):
return True
if (
reference in ["A", "B", "C", "D", "E"]
and choice_answer_clean(prediction) == reference
):
return True
try: # 1. numerical equal
if is_digit(prediction) and is_digit(reference):
prediction = parse_digits(prediction)
reference = parse_digits(reference)
# number questions
if include_percentage:
gt_result = [reference / 100, reference, reference * 100]
else:
gt_result = [reference]
for item in gt_result:
try:
if is_close:
if numeric_equal(prediction, item):
return True
else:
if item == prediction:
return True
except Exception:
continue
return False
except:
pass
if not prediction and prediction not in [0, False]:
return False
# 2. symbolic equal
reference = str(reference).strip()
prediction = str(prediction).strip()
## pmatrix (amps)
if "pmatrix" in prediction and not "pmatrix" in reference:
reference = str_to_pmatrix(reference)
## deal with [], (), {}
pred_str, ref_str = prediction, reference
if (
prediction.startswith("[")
and prediction.endswith("]")
and not reference.startswith("(")
) or (
prediction.startswith("(")
and prediction.endswith(")")
and not reference.startswith("[")
):
pred_str = pred_str.strip("[]()")
ref_str = ref_str.strip("[]()")
for s in ["{", "}", "(", ")"]:
ref_str = ref_str.replace(s, "")
pred_str = pred_str.replace(s, "")
if pred_str.lower() == ref_str.lower():
return True
## [a, b] vs. [c, d], return a==c and b==d
if (
regex.match(r"(\(|\[).+(\)|\])", prediction) is not None
and regex.match(r"(\(|\[).+(\)|\])", reference) is not None
):
pred_parts = prediction[1:-1].split(",")
ref_parts = reference[1:-1].split(",")
if len(pred_parts) == len(ref_parts):
if all(
[
math_equal(
pred_parts[i], ref_parts[i], include_percentage, is_close
)
for i in range(len(pred_parts))
]
):
return True
if (
(
prediction.startswith("\\begin{pmatrix}")
or prediction.startswith("\\begin{bmatrix}")
)
and (
prediction.endswith("\\end{pmatrix}")
or prediction.endswith("\\end{bmatrix}")
)
and (
reference.startswith("\\begin{pmatrix}")
or reference.startswith("\\begin{bmatrix}")
)
and (
reference.endswith("\\end{pmatrix}") or reference.endswith("\\end{bmatrix}")
)
):
pred_lines = [
line.strip()
for line in prediction[
len("\\begin{pmatrix}") : -len("\\end{pmatrix}")
].split("\\\\")
if line.strip()
]
ref_lines = [
line.strip()
for line in reference[
len("\\begin{pmatrix}") : -len("\\end{pmatrix}")
].split("\\\\")
if line.strip()
]
matched = True
if len(pred_lines) == len(ref_lines):
for pred_line, ref_line in zip(pred_lines, ref_lines):
pred_parts = pred_line.split("&")
ref_parts = ref_line.split("&")
if len(pred_parts) == len(ref_parts):
if not all(
[
math_equal(
pred_parts[i],
ref_parts[i],
include_percentage,
is_close,
)
for i in range(len(pred_parts))
]
):
matched = False
break
else:
matched = False
if not matched:
break
else:
matched = False
if matched:
return True
if prediction.count("=") == 1 and reference.count("=") == 1:
pred = prediction.split("=")
pred = f"{pred[0].strip()} - ({pred[1].strip()})"
ref = reference.split("=")
ref = f"{ref[0].strip()} - ({ref[1].strip()})"
if symbolic_equal(pred, ref) or symbolic_equal(f"-({pred})", ref):
return True
elif (
prediction.count("=") == 1
and len(prediction.split("=")[0].strip()) <= 2
and "=" not in reference
):
if math_equal(
prediction.split("=")[1], reference, include_percentage, is_close
):
return True
elif (
reference.count("=") == 1
and len(reference.split("=")[0].strip()) <= 2
and "=" not in prediction
):
if math_equal(
prediction, reference.split("=")[1], include_percentage, is_close
):
return True
# symbolic equal with sympy
if timeout:
if call_with_timeout(symbolic_equal_process, prediction, reference):
return True
else:
if symbolic_equal(prediction, reference):
return True
# symbolic == numeric
try:
prediction=float(N(parse_latex(prediction)))
if abs(prediction-float(reference))<=1e-8: True
except:
pass
try:
reference=float(N(parse_latex(reference)))
if abs(prediction-reference)<=1e-8: return True
except:
pass
return False
def math_equal_process(param):
return math_equal(param[-2], param[-1])
def numeric_equal(prediction: float, reference: float):
# prediction = round(prediction, len(str(reference).split(".")[-1]))
return isclose(reference, prediction, rel_tol=1e-4)
def symbolic_equal(a, b):
def _parse(s):
for f in [parse_latex, parse_expr]:
try:
return f(s.replace("\\\\", "\\"))
except:
try:
return f(s)
except:
pass
return s
a = _parse(a)
b = _parse(b)
# direct equal
try:
if str(a) == str(b) or a == b:
return True
except:
pass
# simplify equal
try:
if a.equals(b) or simplify(a - b) == 0:
return True
except:
pass
# equation equal
try:
if (abs(a.lhs - a.rhs)).equals(abs(b.lhs - b.rhs)):
return True
except:
pass
try:
if numeric_equal(float(N(a)), float(N(b))):
return True
except:
pass
# matrix
try:
# if a and b are matrix
if a.shape == b.shape:
_a = a.applyfunc(lambda x: round(x, 3))
_b = b.applyfunc(lambda x: round(x, 3))
if _a.equals(_b):
return True
except:
pass
return False
def symbolic_equal_process(a, b, output_queue):
result = symbolic_equal(a, b)
output_queue.put(result)
def call_with_timeout(func, *args, timeout=1, **kwargs):
output_queue = multiprocessing.Queue()
process_args = args + (output_queue,)
process = multiprocessing.Process(target=func, args=process_args, kwargs=kwargs)
process.start()
process.join(timeout)
if process.is_alive():
process.terminate()
process.join()
return False
return output_queue.get()
def process_answer_list(answer_list):
answer_list = list(set(answer_list))
if "" in answer_list: answer_list.remove("")
return answer_list
import os
import json
import jsonlines
import copy
from tqdm import tqdm
import pandas as pd
from multiprocessing import Pool
from functools import partial
from datetime import datetime
# api
def is_equal(pred, gt):
pred=normalize_final_answer(pred)
gt=normalize_final_answer(gt)
return math_equal(pred, gt)
def exact_match_eval(pred, gt):
gt=normalize_final_answer(gt)
answer_list=extract(pred)
normalized_answer_list=[]
for answer in copy.deepcopy(answer_list):
normalized_answer_list.append(normalize_final_answer(answer))
normalized_answer_list=process_answer_list(normalized_answer_list)
for answer in normalized_answer_list:
if math_equal(gt, answer):
return normalized_answer_list, True
return normalized_answer_list, False
================================================
FILE: verl/utils/reward_score/geo3k.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from mathruler.grader import extract_boxed_content, grade_answer
def format_reward(predict_str: str) -> float:
pattern = re.compile(r'.*.*\\boxed\{.*\}.*', re.DOTALL)
match_result = re.fullmatch(pattern, predict_str)
return 1.0 if match_result else 0.0
def acc_reward(predict_str: str, ground_truth: str) -> float:
answer = extract_boxed_content(predict_str)
return 1.0 if grade_answer(answer, ground_truth) else 0.0
def compute_score(predict_str: str, ground_truth: str) -> float:
return 0.9 * acc_reward(predict_str, ground_truth) + 0.1 * format_reward(predict_str)
================================================
FILE: verl/utils/reward_score/gsm8k.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
def extract_solution(solution_str, method='strict'):
assert method in ['strict', 'flexible']
if method == 'strict':
# this also tests the formatting of the model
solution = re.search("#### (\\-?[0-9\\.\\,]+)", solution_str)
if solution is None:
final_answer = None
else:
final_answer = solution.group(0)
final_answer = final_answer.split('#### ')[1].replace(',', '').replace('$', '')
elif method == 'flexible':
answer = re.findall("(\\-?[0-9\\.\\,]+)", solution_str)
final_answer = None
if len(answer) == 0:
# no reward is there is no answer
pass
else:
invalid_str = ['', '.']
# find the last number that is not '.'
for final_answer in reversed(answer):
if final_answer not in invalid_str:
break
return final_answer
def compute_score(solution_str, ground_truth, method='strict', format_score=0., score=1.):
"""The scoring function for GSM8k.
Reference: Trung, Luong, et al. "Reft: Reasoning with reinforced fine-tuning." Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). 2024.
Args:
solution_str: the solution text
ground_truth: the ground truth
method: the method to extract the solution, choices are 'strict' and 'flexible'
format_score: the score for the format
score: the score for the correct answer
"""
answer = extract_solution(solution_str=solution_str, method=method)
if answer is None:
return 0
else:
if answer == ground_truth:
return score
else:
return format_score
================================================
FILE: verl/utils/reward_score/math.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/hendrycks_math/utils.py
def compute_score(solution_str, ground_truth) -> float:
retval = 0.
try:
string_in_last_boxed = last_boxed_only_string(solution_str)
if string_in_last_boxed is not None:
answer = remove_boxed(string_in_last_boxed)
if is_equiv(answer, ground_truth):
retval = 1.
except Exception as e:
print(e)
return retval
# string normalization from https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/hendrycks_math.py
def is_equiv(str1, str2, verbose=False):
if str1 is None and str2 is None:
print("WARNING: Both None")
return True
if str1 is None or str2 is None:
return False
try:
ss1 = strip_string(str1)
ss2 = strip_string(str2)
if verbose:
print(ss1, ss2)
return ss1 == ss2
except Exception:
return str1 == str2
def remove_boxed(s):
if "\\boxed " in s:
left = "\\boxed "
assert s[:len(left)] == left
return s[len(left):]
left = "\\boxed{"
assert s[:len(left)] == left
assert s[-1] == "}"
return s[len(left):-1]
def last_boxed_only_string(string):
idx = string.rfind("\\boxed")
if "\\boxed " in string:
return "\\boxed " + string.split("\\boxed ")[-1].split("$")[0]
if idx < 0:
idx = string.rfind("\\fbox")
if idx < 0:
return None
i = idx
right_brace_idx = None
num_left_braces_open = 0
while i < len(string):
if string[i] == "{":
num_left_braces_open += 1
if string[i] == "}":
num_left_braces_open -= 1
if num_left_braces_open == 0:
right_brace_idx = i
break
i += 1
if right_brace_idx is None:
retval = None
else:
retval = string[idx:right_brace_idx + 1]
return retval
def fix_fracs(string):
substrs = string.split("\\frac")
new_str = substrs[0]
if len(substrs) > 1:
substrs = substrs[1:]
for substr in substrs:
new_str += "\\frac"
if substr[0] == "{":
new_str += substr
else:
try:
assert len(substr) >= 2
except AssertionError:
return string
a = substr[0]
b = substr[1]
if b != "{":
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}{" + b + "}" + post_substr
else:
new_str += "{" + a + "}{" + b + "}"
else:
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}" + b + post_substr
else:
new_str += "{" + a + "}" + b
string = new_str
return string
def fix_a_slash_b(string):
if len(string.split("/")) != 2:
return string
a = string.split("/")[0]
b = string.split("/")[1]
try:
a = int(a)
b = int(b)
assert string == "{}/{}".format(a, b)
new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
return new_string
except AssertionError:
return string
def remove_right_units(string):
# "\\text{ " only ever occurs (at least in the val set) when describing units
if "\\text{ " in string:
splits = string.split("\\text{ ")
assert len(splits) == 2
return splits[0]
else:
return string
def fix_sqrt(string):
if "\\sqrt" not in string:
return string
splits = string.split("\\sqrt")
new_string = splits[0]
for split in splits[1:]:
if split[0] != "{":
a = split[0]
new_substr = "\\sqrt{" + a + "}" + split[1:]
else:
new_substr = "\\sqrt" + split
new_string += new_substr
return new_string
def strip_string(string):
# linebreaks
string = string.replace("\n", "")
# remove inverse spaces
string = string.replace("\\!", "")
# replace \\ with \
string = string.replace("\\\\", "\\")
# replace tfrac and dfrac with frac
string = string.replace("tfrac", "frac")
string = string.replace("dfrac", "frac")
# remove \left and \right
string = string.replace("\\left", "")
string = string.replace("\\right", "")
# Remove circ (degrees)
string = string.replace("^{\\circ}", "")
string = string.replace("^\\circ", "")
# remove dollar signs
string = string.replace("\\$", "")
# remove units (on the right)
string = remove_right_units(string)
# remove percentage
string = string.replace("\\%", "")
string = string.replace("\%", "") # noqa: W605
# " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string
string = string.replace(" .", " 0.")
string = string.replace("{.", "{0.")
# if empty, return empty string
if len(string) == 0:
return string
if string[0] == ".":
string = "0" + string
# to consider: get rid of e.g. "k = " or "q = " at beginning
if len(string.split("=")) == 2:
if len(string.split("=")[0]) <= 2:
string = string.split("=")[1]
# fix sqrt3 --> sqrt{3}
string = fix_sqrt(string)
# remove spaces
string = string.replace(" ", "")
# \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). Also does a/b --> \\frac{a}{b}
string = fix_fracs(string)
# manually change 0.5 --> \frac{1}{2}
if string == "0.5":
string = "\\frac{1}{2}"
# NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y
string = fix_a_slash_b(string)
return string
================================================
FILE: verl/utils/reward_score/math_verifier.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/EleutherAI/lm-evaluation-harness/blob/main/lm_eval/tasks/hendrycks_math/utils.py
import signal
import re
from contextlib import contextmanager
import importlib.util
from verl.utils.reward_score.eval import normalize_final_answer
from math_verify import parse, verify
class TimeoutException(Exception):
pass
@contextmanager
def timeout(seconds):
def signal_handler(signum, frame):
raise TimeoutException("Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
timeout_seconds=2
chinese_pattern = re.compile(r'[\u4e00-\u9fff]')
english_pattern = re.compile(r'[a-zA-Z]')
boxed_pattern = re.compile(r"\\boxed\{((?:[^{}]|\\{|\\}|(?:\{(?:[^{}]|\\{|\\}|(?:\{(?:[^{}]|\\{|\\}|(?:\{[^{}]*\}))*\}))*\}))*\})")
valid_char_pattern = re.compile(r'[a-zA-Z0-9\s\.,!?"\'\(\)\{\}\[\]_\-+=<>/@#$%^&*\\|:;~`\u2200-\u22FF]')
repeat_pattern = re.compile(r'(.{5,}?)\1{4,}')
def check_mixed_languages(text):
chinese_chars = len(chinese_pattern.findall(text))
english_chars = len(english_pattern.findall(text))
return chinese_chars >= 20 and english_chars >= 20
def undesired_format(text):
if "<|endoftext|>" not in text: return True
else: return False
def check_garbled_characters(text):
valid_chars = valid_char_pattern.sub('', text)
if not text:
return False
invalid_ratio = len(valid_chars) / len(text)
return invalid_ratio > 0.3
def has_repeated_patterns(text):
return bool(repeat_pattern.search(text))
def correctness_score_default(response, gt):
matches = boxed_pattern.findall(response)
if not matches: return -1.0
pred = matches[-1][:-1]
return 1.0 if is_equiv(pred, gt) else -1.0
def correctness_score_v2(response, gt):
matches = boxed_pattern.findall(response)
if not matches: return -1.0
pred = matches[-1][:-1]
return 1.0 if is_equiv(pred, gt) else -0.5
def compute_score(solution_str, ground_truth, reward_type) -> float:
if reward_type=='default':
try:
# if undesired_format(solution_str): return -1.0
return correctness_score_default(solution_str, ground_truth)
except TimeoutException:
return -1.0
except Exception as e:
return -1.0
elif reward_type=="v2.wformat":
try:
return correctness_score_v2(solution_str, ground_truth)
except TimeoutException:
return -1.0
except Exception as e:
return -1.0
else:
try:
# if undesired_format(solution_str): return -1.0
return correctness_score_default(solution_str, ground_truth)
except TimeoutException:
return -1.0
except Exception as e:
return -1.0
# string normalization from https://github.com/EleutherAI/lm-evaluation-harness/blob/master/lm_eval/tasks/hendrycks_math.py
def is_equiv(str1, str2, verbose=False):
if str1 is None and str2 is None:
print("WARNING: Both None")
return True
if str1 is None or str2 is None:
return False
if str1.strip().lower() == str2.strip().lower(): return True
try:
str1=normalize_final_answer(str1)
str2=normalize_final_answer(str2)
str1=parse(str1)
str2=parse(str2)
return verify(str1, str2)
except:
pass
try:
ss1 = strip_string(str1)
ss2 = strip_string(str2)
if verbose:
print(ss1, ss2)
return ss1==ss2
except Exception:
return str1 == str2
def remove_boxed(s):
if "\\boxed " in s:
left = "\\boxed "
assert s[:len(left)] == left
return s[len(left):]
left = "\\boxed{"
assert s[:len(left)] == left
assert s[-1] == "}"
return s[len(left):-1]
def last_boxed_only_string(string):
idx = string.rfind("\\boxed")
if "\\boxed " in string:
return "\\boxed " + string.split("\\boxed ")[-1].split("$")[0]
if idx < 0:
idx = string.rfind("\\fbox")
if idx < 0:
return None
i = idx
right_brace_idx = None
num_left_braces_open = 0
while i < len(string):
if string[i] == "{":
num_left_braces_open += 1
if string[i] == "}":
num_left_braces_open -= 1
if num_left_braces_open == 0:
right_brace_idx = i
break
i += 1
if right_brace_idx is None:
retval = None
else:
retval = string[idx:right_brace_idx + 1]
return retval
def fix_fracs(string):
substrs = string.split("\\frac")
new_str = substrs[0]
if len(substrs) > 1:
substrs = substrs[1:]
for substr in substrs:
new_str += "\\frac"
if substr[0] == "{":
new_str += substr
else:
try:
assert len(substr) >= 2
except AssertionError:
return string
a = substr[0]
b = substr[1]
if b != "{":
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}{" + b + "}" + post_substr
else:
new_str += "{" + a + "}{" + b + "}"
else:
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}" + b + post_substr
else:
new_str += "{" + a + "}" + b
string = new_str
return string
def fix_a_slash_b(string):
if len(string.split("/")) != 2:
return string
a = string.split("/")[0]
b = string.split("/")[1]
try:
a = int(a)
b = int(b)
assert string == "{}/{}".format(a, b)
new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
return new_string
except AssertionError:
return string
def remove_right_units(string):
# "\\text{ " only ever occurs (at least in the val set) when describing units
if "\\text{ " in string:
splits = string.split("\\text{ ")
assert len(splits) == 2
return splits[0]
else:
return string
def fix_sqrt(string):
if "\\sqrt" not in string:
return string
splits = string.split("\\sqrt")
new_string = splits[0]
for split in splits[1:]:
if split[0] != "{":
a = split[0]
new_substr = "\\sqrt{" + a + "}" + split[1:]
else:
new_substr = "\\sqrt" + split
new_string += new_substr
return new_string
def strip_string(string):
# linebreaks
string = string.replace("\n", "")
# remove inverse spaces
string = string.replace("\\!", "")
# replace \\ with \
string = string.replace("\\\\", "\\")
# replace tfrac and dfrac with frac
string = string.replace("tfrac", "frac")
string = string.replace("dfrac", "frac")
# remove \left and \right
string = string.replace("\\left", "")
string = string.replace("\\right", "")
# Remove circ (degrees)
string = string.replace("^{\\circ}", "")
string = string.replace("^\\circ", "")
# remove dollar signs
string = string.replace("\\$", "")
# remove units (on the right)
string = remove_right_units(string)
# remove percentage
string = string.replace("\\%", "")
string = string.replace("\%", "") # noqa: W605
# " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string
string = string.replace(" .", " 0.")
string = string.replace("{.", "{0.")
# if empty, return empty string
if len(string) == 0:
return string
if string[0] == ".":
string = "0" + string
# to consider: get rid of e.g. "k = " or "q = " at beginning
if len(string.split("=")) == 2:
if len(string.split("=")[0]) <= 2:
string = string.split("=")[1]
# fix sqrt3 --> sqrt{3}
string = fix_sqrt(string)
# remove spaces
string = string.replace(" ", "")
# \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). Also does a/b --> \\frac{a}{b}
string = fix_fracs(string)
# manually change 0.5 --> \frac{1}{2}
if string == "0.5":
string = "\\frac{1}{2}"
# NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y
string = fix_a_slash_b(string)
return string
if __name__ == "__main__":
response="To determine which digit appears in the 534th place after the decimal point in the decimal representation of $\\frac{5}{13}$, we need to first find the repeating decimal sequence of $\\frac{5}{13}$. \n\nLet's start by calculating the decimal representation of $\\frac{5}{13}$.\n```python\nfrom decimal import Decimal, getcontext\r\n\r\n# Set the precision high enough to see the repeating pattern clearly\r\ngetcontext().prec = 1000\r\n\r\n# Calculate the decimal representation of 5/13\r\ndecimal_rep = Decimal(5) / Decimal(13)\r\nprint(str(decimal_rep))\n```\n```output\n0.3846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846153846\n```\nThe decimal representation of $\\frac{5}{13}$ is $0.\\overline{384615}$. This means the repeating sequence is \"384615\" and it has a length of 6 digits.\n\nTo find the digit in the 534th place after the decimal point, we need to determine the position within the repeating sequence. Since the sequence repeats every 6 digits, we can find the position by calculating the remainder when 534 is divided by 6.\n\nLet's calculate this.\n```python\n# Length of the repeating sequence\r\nrepeating_sequence = \"384615\"\r\nsequence_length = len(repeating_sequence)\r\n\r\n# Find the position within the repeating sequence\r\nposition = (534 - 1) % sequence_length # -1 because indexing starts from 0\r\n\r\n# Get the digit at that position\r\ndigit_in_534th_place = repeating_sequence[position]\r\nprint(digit_in_534th_place)\n```\n```output\n6\n```\nThe digit in the 534th place after the decimal point in the decimal representation of $\\frac{5}{13}$ is $\\boxed{6}$. <|endoftext|>"
answer="6"
res=compute_score(response, answer)
print(res)
================================================
FILE: verl/utils/reward_score/prime_code/__init__.py
================================================
# Copyright 2024 PRIME team and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .utils import check_correctness as apps_check_correctness
import json
import re
import traceback
def compute_score(completion, test_cases, continuous=False):
# try to get code solution from completion. if the completion is pure code, this will not take effect.
solution = completion.split('```python')[-1].split('```')[0]
try:
try:
if not isinstance(test_cases, dict):
test_cases = json.loads(test_cases)
except Exception as e:
print(f"Error:{e}")
# Complete check on all in-out pairs first. If there is no failure, per-sample test can be skipped.
try:
res, metadata = apps_check_correctness(in_outs=test_cases, generation=solution, timeout=5, debug=False)
metadata = dict(enumerate(metadata))[0]
success = all(map(lambda x: x == True, res))
if success:
return success, metadata
except Exception as e:
pass
test_cases_list = []
inputs = test_cases["inputs"]
outputs = test_cases["outputs"]
for i in range(len(inputs)):
test_cases_list.append({"inputs": [inputs[i]], "outputs": [outputs[i]]})
if continuous:
# per sample test: if continuous score is needed, test first 10 samples regardless of failures
# do not test all samples cuz some problems have enormous test cases
metadata_list = []
res_list = []
for test_case_id, test_case in enumerate(test_cases_list):
res, metadata = apps_check_correctness(in_outs=test_case, generation=solution, timeout=5, debug=False)
try:
metadata = dict(enumerate(metadata))[0] # metadata can be empty occasionally
except Exception as e:
metadata = {}
metadata["test_case"] = {}
metadata["test_case"]["input"] = str(test_case["inputs"][0])
metadata["test_case"]["output"] = str(test_case["outputs"][0])
metadata["test_case"]["res"] = str(res)
metadata_list.append(metadata)
res_list.extend(res)
if test_case_id >= 9:
break
res_count = len(res_list) if len(res_list) > 0 else 1
success = sum(map(lambda x: x == True, res_list)) / res_count
except Exception as e:
traceback.print_exc(10)
success = False
metadata_list = None
return success, metadata_list
================================================
FILE: verl/utils/reward_score/prime_code/testing_util.py
================================================
# Copyright 2024 PRIME team and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import json
import sys
import faulthandler
import platform
# used for debugging to time steps
from datetime import datetime
# to run the solution files we're using a timing based approach
import signal
import numpy as np
# for capturing the stdout
from io import StringIO
# used for testing the code that reads from input
from unittest.mock import patch, mock_open
from pyext import RuntimeModule
from enum import Enum
import traceback
def truncatefn(s, length=300):
assert isinstance(s, str)
if len(s) <= length:
return s
return s[:length // 2] + "...(truncated) ..." + s[-length // 2:]
class CODE_TYPE(Enum):
call_based = 0
standard_input = 1
# stuff for setting up signal timer
class TimeoutException(Exception):
pass
def timeout_handler(signum, frame):
print("alarm went off")
return
# raise TimeoutException # this is an unhandled exception. just return None is OK
signal.signal(signal.SIGALRM, timeout_handler)
# timeout = 6 # seconds
# used to capture stdout as a list
# from https://stackoverflow.com/a/16571630/6416660
# alternative use redirect_stdout() from contextlib
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
# Make closing the StringIO a no-op
self._stringio.close = lambda x: 1
return self
def __exit__(self, *args):
self.append(self._stringio.getvalue())
del self._stringio # free up some memory
sys.stdout = self._stdout
def only_int_check(val):
return isinstance(val, int)
def string_int_check(val):
return isinstance(val, str) and val.isdigit()
def combined_int_check(val):
return only_int_check(val) or string_int_check(val)
def clean_traceback(error_traceback):
file_start = error_traceback.find('File \"\"')
# print(file_start)
error_traceback = "Traceback (most recent call last):\n " + error_traceback[file_start:]
return error_traceback
def run_test(in_outs, test=None, debug=False, timeout=15):
"""
if test(generated_code) is not None it'll try to run the code.
otherwise it'll just return an input and output pair.
"""
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
if debug:
print(f"start = {datetime.now().time()}")
if in_outs:
if in_outs.get("fn_name") is None:
which_type = CODE_TYPE.standard_input # Standard input
method_name = None
else:
which_type = CODE_TYPE.call_based # Call-based
method_name = in_outs["fn_name"]
if debug:
print(f"loaded input_output = {datetime.now().time()}")
if test is None:
assert False, "should not happen: test code is none"
return in_outs, {"error": "no test code provided"}
elif test is not None:
results = []
sol = "from string import *\nfrom re import *\nfrom datetime import *\nfrom collections import *\nfrom heapq import *\nfrom bisect import *\nfrom copy import *\nfrom math import *\nfrom random import *\nfrom statistics import *\nfrom itertools import *\nfrom functools import *\nfrom operator import *\nfrom io import *\nfrom sys import *\nfrom json import *\nfrom builtins import *\nfrom typing import *\nimport string\nimport re\nimport datetime\nimport collections\nimport heapq\nimport bisect\nimport copy\nimport math\nimport random\nimport statistics\nimport itertools\nimport functools\nimport operator\nimport io\nimport sys\nimport json\nsys.setrecursionlimit(6*10**5)\n"
if debug:
print(f"loading test code = {datetime.now().time()}")
if which_type == CODE_TYPE.call_based:
sol += test
if debug:
print(f"sol = {sol}")
signal.alarm(timeout)
try:
tmp_sol = RuntimeModule.from_string("tmp_sol", "", sol)
if "class Solution" not in test:
tmp = tmp_sol
else:
tmp = tmp_sol.Solution()
signal.alarm(0)
except Exception as e:
signal.alarm(0)
error_traceback = traceback.format_exc()
if debug:
print(f"type 0 compilation error = {e}")
results.append(-2)
return results, {
"error": repr(e),
# "error_code": -1,
# "error_message": "Compilation Error",
"traceback": clean_traceback(error_traceback),
}
signal.alarm(0)
elif which_type == CODE_TYPE.standard_input:
# sol
# if code has if __name__ == "__main__": then remove it
try:
astree = ast.parse(test)
last_block = astree.body[-1]
if isinstance(last_block, ast.If):
condition = last_block.test
if ast.unparse(condition).strip() == "__name__ == '__main__'":
test = (ast.unparse(astree.body[:-1]) + "\n" + ast.unparse(last_block.body))
except:
pass
tmp_test = test.split("\n")
new_test = []
for x in tmp_test:
if (not x.startswith("from ")) and (not x.startswith("import ")):
new_test.append("\t" + x + "\n")
else:
new_test.append(x + "\n")
tmp_test = new_test
new_test = ""
started = False
for i in tmp_test:
if i.startswith("\t") and not started:
new_test += "stdin = sys.stdin\nstdout = sys.stdout\n"
new_test += "def code():\n"
new_test += i
started = True
elif started and ((i.startswith("from ")) or (i.startswith("import "))):
new_test += "\t" + i
else:
new_test += i
tmp_test = new_test
sol += tmp_test
if debug:
print(f"sol = {sol}")
method_name = "code"
signal.alarm(timeout)
try:
tmp_sol = RuntimeModule.from_string("tmp_sol", "", sol)
tmp = tmp_sol
signal.alarm(0)
except Exception as e:
signal.alarm(0)
error_traceback = traceback.format_exc()
if debug:
print(f"type 1 compilation error = {e}")
results.append(-2)
return results, {
"error": repr(e),
# "error_code": -1,
# "error_message": "Compilation Error",
"traceback": clean_traceback(error_traceback),
}
signal.alarm(0)
if debug:
print(f"get method = {datetime.now().time()}")
try:
method = getattr(tmp, method_name) # get_attr second arg must be str
except:
signal.alarm(0)
error_traceback = traceback.format_exc()
e = sys.exc_info()
print(f"unable to get function error = {e}")
results.append(-2)
return results, {
"error": repr(e),
# "error_code": -1,
# "error_message": "Unable to extract code",
"traceback": clean_traceback(error_traceback),
}
for index, inputs in enumerate(in_outs["inputs"]):
raw_inputs = inputs
raw_outputs = in_outs["outputs"][index]
if which_type == CODE_TYPE.call_based:
inputs = [json.loads(line) for line in inputs.split("\n")]
in_outs["outputs"][index] = json.loads(in_outs["outputs"][index])
truncate_line_size = 300 // (raw_inputs.count("\n") + 1)
raw_inputs = "\n".join(
[truncatefn(line, truncate_line_size) for line in raw_inputs.strip().split("\n")])
raw_outputs = truncatefn(raw_outputs, 200)
else:
raw_inputs = truncatefn(raw_inputs)
raw_outputs = truncatefn(raw_outputs, 200)
# JSON forces dictionaries to have string keys; this undoes this (assuming a singleton list)
try:
if isinstance(inputs[0], dict):
inputs = [{int(k): v for k, v in inputs[0].items()}]
except:
True
try:
if isinstance(in_outs["outputs"][index], dict):
in_outs["outputs"][index] = [{int(k): v for k, v in in_outs["outputs"][index].items()}]
except:
True
try:
if isinstance(in_outs["outputs"][index][0], dict):
in_outs["outputs"][index] = [{int(k): v for k, v in in_outs["outputs"][index][0].items()}]
except:
True
if debug:
print(
f"time: {datetime.now().time()} testing index = {index} inputs = {inputs}, {type(inputs)}. type = {which_type}"
)
if which_type == CODE_TYPE.call_based: # Call-based
signal.alarm(timeout)
faulthandler.enable()
try:
output = method(*inputs)
raw_true_output = output
raw_true_output_copy = json.dumps(output)
raw_true_output_copy = truncatefn(raw_true_output_copy, 200)
# ground truth sequences are not tuples
if isinstance(output, tuple):
output = list(output)
tmp_result = output == in_outs["outputs"][index]
if (isinstance(in_outs["outputs"][index], list) and in_outs["outputs"][index]):
tmp_result = tmp_result or (output == in_outs["outputs"][index][0])
# ground truth sequences are not tuples
try:
if isinstance(output[0], tuple):
tmp_result = tmp_result or ([list(x) for x in output] == in_outs["outputs"][index][0])
except:
True
results.append(tmp_result)
if tmp_result != True:
return results, {
"output": raw_true_output_copy,
"expected": raw_outputs,
"inputs": raw_inputs,
# "error_code": -2,
"error_message": "Wrong Answer",
}
# reset the alarm
signal.alarm(0)
except Exception as e:
signal.alarm(0)
error_traceback = traceback.format_exc()
faulthandler.disable()
if debug:
print(f"Standard input runtime error or time limit exceeded error = {e}")
results.append(-1)
if "timeoutexception" in repr(e).lower():
return results, {
"error": repr(e),
# "error_code": -3,
# "error_message": "Time Limit Exceeded",
# "inputs": raw_inputs,
# "expected": raw_outputs,
"traceback": clean_traceback(error_traceback),
}
else:
return results, {
"error": repr(e),
# "error_code": -4,
# "error_message": "Runtime Error",
# "inputs": raw_inputs,
# "expected": raw_outputs,
"traceback": clean_traceback(error_traceback),
}
faulthandler.disable()
signal.alarm(0)
if debug:
print(
f"outputs = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs}, {type(inputs)}, {output == [in_outs['outputs'][index]]}"
)
elif which_type == CODE_TYPE.standard_input: # Standard input
faulthandler.enable()
passed = False
if isinstance(inputs, list):
inputs = "\n".join(inputs)
if isinstance(in_outs["outputs"][index], list):
in_outs["outputs"][index] = "\n".join(in_outs["outputs"][index])
signal.alarm(timeout)
with Capturing() as output:
try:
call_method(method, inputs)
# reset the alarm
signal.alarm(0)
passed = True
except Exception as e:
# runtime error or took too long
signal.alarm(0)
error_traceback = traceback.format_exc()
print(f"Call-based runtime error or time limit exceeded error = {repr(e)}{e}")
results.append(-1)
if "timeoutexception" in repr(e).lower():
return results, {
"error": repr(e),
# "error_code": -3,
# "error_message": "Time Limit Exceeded",
# "inputs": raw_inputs,
# "expected": raw_outputs,
"traceback": clean_traceback(error_traceback),
}
else:
return results, {
"error": repr(e),
# "error_code": -4,
# "error_message": "Runtime Error",
# "inputs": raw_inputs,
# "expected": raw_outputs,
"traceback": clean_traceback(error_traceback),
}
signal.alarm(0)
raw_true_output = output[0]
raw_true_output_copy = truncatefn(raw_true_output, 200)
output = raw_true_output.splitlines()
if not passed:
if debug:
nl = "\n"
if not isinstance(inputs, list):
print(
f"not passed output = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs.replace(nl,' new-line ')}, {type(inputs)}, {output == [in_outs['outputs'][index]]}"
)
else:
print(
f"not passed output = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs}, {type(inputs)}, {output == [in_outs['outputs'][index]]}"
)
continue
if passed and debug:
print(f"==> output = {output}, test outputs = {in_outs['outputs'][index]}")
if custom_compare_(output, in_outs["outputs"][index]):
tmp_result = True
results.append(tmp_result)
continue
# ground truth sequences are expressed as lists not tuples
if isinstance(output, tuple):
output = list(output)
tmp_result = False
try:
tmp_result = output == [in_outs["outputs"][index]]
if isinstance(in_outs["outputs"][index], list):
tmp_result = tmp_result or (output == in_outs["outputs"][index])
if isinstance(output[0], str):
tmp_result = tmp_result or ([e.strip() for e in output] == in_outs["outputs"][index])
except Exception as e:
if debug:
print(f"Failed check1 exception = {e}")
pass
if tmp_result == True:
results.append(tmp_result)
continue
# try one more time without \n
if isinstance(in_outs["outputs"][index], list):
for tmp_index, i in enumerate(in_outs["outputs"][index]):
in_outs["outputs"][index][tmp_index] = i.split("\n")
in_outs["outputs"][index][tmp_index] = [
x.strip() for x in in_outs["outputs"][index][tmp_index] if x
]
else:
in_outs["outputs"][index] = in_outs["outputs"][index].split("\n")
in_outs["outputs"][index] = list(filter(len, in_outs["outputs"][index]))
in_outs["outputs"][index] = list(map(lambda x: x.strip(), in_outs["outputs"][index]))
try:
tmp_result = output == [in_outs["outputs"][index]]
if isinstance(in_outs["outputs"][index], list):
tmp_result = tmp_result or (output == in_outs["outputs"][index])
except Exception as e:
if debug:
print(f"Failed check2 exception = {e}")
pass
if tmp_result == True:
results.append(tmp_result)
continue
# try by converting the output into a split up list too
if isinstance(output, list):
output = list(filter(len, output))
if debug:
nl = "\n"
if not isinstance(inputs, list):
print(
f"@1 output = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs.replace(nl,' new-line ')}, {type(inputs)}, {output == [in_outs['outputs'][index]]} {tmp_result=}"
)
else:
print(
f"@1 output = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs}, {type(inputs)}, {output == [in_outs['outputs'][index]]} {tmp_result=}"
)
if tmp_result == True:
results.append(tmp_result)
continue
if debug:
print(f"{tmp_result=} @a")
try:
tmp_result = output == [in_outs["outputs"][index]]
if isinstance(in_outs["outputs"][index], list):
tmp_result = tmp_result or (output == in_outs["outputs"][index])
except Exception as e:
if debug:
print(f"Failed check3 exception = {e}")
pass
if debug:
print(f"{tmp_result=} @b")
try:
all_ints = all(
combined_int_check(e1) and combined_int_check(e2)
for e1, e2 in zip(output, in_outs["outputs"][index]))
if not all_ints:
if debug:
print([
combined_int_check(e1) and combined_int_check(e2)
for e1, e2 in zip(output, in_outs["outputs"][index])
])
output_float = [float(e) for e in output]
gt_float = [float(e) for e in in_outs["outputs"][index]]
tmp_result = tmp_result or ((len(output_float) == len(gt_float)) and
np.allclose(output_float, gt_float))
except Exception as e:
pass
if debug:
print(f"{tmp_result=} @c")
try:
if isinstance(output[0], list):
all_ints = all(
combined_int_check(e1) and combined_int_check(e2)
for e1, e2 in zip(output[0], in_outs["outputs"][index]))
if not all_ints:
output_float = [float(e) for e in output[0]]
gt_float = [float(e) for e in in_outs["outputs"][index][0]]
tmp_result = tmp_result or ((len(output_float) == len(gt_float)) and
np.allclose(output_float, gt_float))
except Exception as e:
pass
if tmp_result == True:
results.append(tmp_result)
continue
if debug:
print(f"{tmp_result=} @d")
# try by converting the stuff into split up list
if isinstance(in_outs["outputs"][index], list):
for tmp_index, i in enumerate(in_outs["outputs"][index]):
in_outs["outputs"][index][tmp_index] = set(i.split())
else:
in_outs["outputs"][index] = set(in_outs["outputs"][index].split())
if debug:
print(f"{tmp_result=} @e")
try:
tmp_result = output == in_outs["outputs"][index]
except Exception as e:
if debug:
print(f"Failed check4 exception = {e}")
continue
if tmp_result == True:
results.append(tmp_result)
continue
if debug:
print(f"{tmp_result=} @f")
# try by converting the output into a split up list too
if isinstance(output, list):
for tmp_index, i in enumerate(output):
output[tmp_index] = i.split()
output = list(filter(len, output))
for tmp_index, i in enumerate(output):
output[tmp_index] = set(i)
else:
output = output.split()
output = list(filter(len, output))
output = set(output)
if debug:
print(f"{tmp_result=} @g")
if tmp_result == True and debug:
print("PASSED")
results.append(tmp_result)
if tmp_result != True:
return results, {
"output": raw_true_output_copy,
"expected": raw_outputs,
"inputs": raw_inputs,
# "error_code": -2,
"error_message": "Wrong Answer",
}
if debug:
nl = "\n"
if not isinstance(inputs, list):
print(
f"@2 output = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs.replace(nl,' new-line ')}, {type(inputs)}, {output == [in_outs['outputs'][index]]}"
)
else:
print(
f"@2 output = {output}, test outputs = {in_outs['outputs'][index]}, inputs = {inputs}, {type(inputs)}, {output == [in_outs['outputs'][index]]}"
)
print(f"results = {results}")
return results, {}
def custom_compare_(output, ground_truth):
if isinstance(output, list):
output_1 = "\n".join(output)
if stripped_string_compare(output_1, ground_truth):
return True
if isinstance(output, list):
output_2 = [o.lstrip().rstrip() for o in output]
output_2 = "\n".join(output_2)
if stripped_string_compare(output_2, ground_truth):
return True
return False
def stripped_string_compare(s1, s2):
s1 = s1.lstrip().rstrip()
s2 = s2.lstrip().rstrip()
return s1 == s2
def call_method(method, inputs):
if isinstance(inputs, list):
inputs = "\n".join(inputs)
inputs_line_iterator = iter(inputs.split("\n"))
# sys.setrecursionlimit(10000)
# @patch('builtins.input', side_effect=inputs.split("\n"))
@patch("builtins.open", mock_open(read_data=inputs))
@patch("sys.stdin", StringIO(inputs))
@patch("sys.stdin.readline", lambda *args: next(inputs_line_iterator))
@patch("sys.stdin.readlines", lambda *args: inputs.split("\n"))
@patch("sys.stdin.read", lambda *args: inputs)
# @patch('sys.stdout.write', print)
def _inner_call_method(_method):
try:
return _method()
except SystemExit as e:
pass
finally:
pass
return _inner_call_method(method)
def reliability_guard(maximum_memory_bytes=None):
"""
This disables various destructive functions and prevents the generated code
from interfering with the test (e.g. fork bomb, killing other processes,
removing filesystem files, etc.)
WARNING
This function is NOT a security sandbox. Untrusted code, including, model-
generated code, should not be blindly executed outside of one. See the
Codex paper for more information about OpenAI's code sandbox, and proceed
with caution.
"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes))
resource.setrlimit(resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes))
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes))
faulthandler.disable()
import builtins
builtins.exit = None
builtins.quit = None
import os
os.environ["OMP_NUM_THREADS"] = "1"
os.kill = None
os.system = None # 防止干扰repl评测
os.putenv = None
os.remove = None
os.removedirs = None
os.rmdir = None
os.fchdir = None
os.setuid = None
os.fork = None
os.forkpty = None
os.killpg = None
os.rename = None
os.renames = None
os.truncate = None
os.replace = None
os.unlink = None
os.fchmod = None
os.fchown = None
os.chmod = None
os.chown = None
os.chroot = None
os.fchdir = None
os.lchflags = None
os.lchmod = None
os.lchown = None
os.getcwd = None
os.chdir = None
import shutil
shutil.rmtree = None
shutil.move = None
shutil.chown = None
import subprocess
subprocess.Popen = None # type: ignore
__builtins__["help"] = None
import sys
sys.modules["ipdb"] = None
sys.modules["joblib"] = None
sys.modules["resource"] = None
sys.modules["psutil"] = None
sys.modules["tkinter"] = None
================================================
FILE: verl/utils/reward_score/prime_code/utils.py
================================================
# Copyright 2024 PRIME team and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Borrowed from: https://huggingface.co/spaces/codeparrot/apps_metric/blob/main/utils.py
import multiprocessing
from typing import Dict, Optional
from datasets import load_dataset
from .testing_util import run_test
import traceback
import os, sys
def _temp_run(sample, generation, debug, result, metadata_list, timeout):
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
sys.stderr = devnull
try:
res, metadata = run_test(in_outs=sample, test=generation, debug=debug, timeout=timeout)
result.append(res)
metadata_list.append(metadata)
except Exception as e:
# print(e) # some tracebacks are extremely long.
traceback.print_exc(10)
result.append([-1 for i in range(len(sample['inputs']))])
metadata_list.append({})
def check_correctness(in_outs: Optional[dict], generation, timeout=10, debug=True):
"""Check correctness of code generation with a global timeout.
The global timeout is to catch some extreme/rare cases not handled by the timeouts
inside `run_test`"""
manager = multiprocessing.Manager()
result = manager.list()
metadata_list = manager.list()
p = multiprocessing.Process(target=_temp_run, args=(in_outs, generation, debug, result, metadata_list, timeout))
p.start()
p.join(timeout=timeout + 1)
if p.is_alive():
p.kill()
# p.terminate()
if not result:
# consider that all tests failed
result = [[-1 for i in range(len(in_outs["inputs"]))]]
if debug:
print(f"global timeout")
return result[0], metadata_list
================================================
FILE: verl/utils/reward_score/prime_math/__init__.py
================================================
# Copyright 2024 PRIME team and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Answer checker API that uses sympy to simplify expressions and check for equality.
Call grade_answer(given_answer: str, ground_truth: str).
FROM: https://github.com/openai/prm800k/blob/main/prm800k/grading/grader.py
"""
import re
import sympy
from pylatexenc import latex2text
from sympy.parsing import sympy_parser
from . import math_normalize
from .grader import math_equal
# import math_normalize
# from grader import math_equal
# sympy might hang -- we don't care about trying to be lenient in these cases
BAD_SUBSTRINGS = ["^{", "^("]
BAD_REGEXES = ["\^[0-9]+\^", "\^[0-9][0-9]+"]
TUPLE_CHARS = "()[]"
def _sympy_parse(expr: str):
"""Parses an expression with sympy."""
py_expr = expr.replace("^", "**")
return sympy_parser.parse_expr(
py_expr,
transformations=(sympy_parser.standard_transformations + (sympy_parser.implicit_multiplication_application,)),
)
def _parse_latex(expr: str) -> str:
"""Attempts to parse latex to an expression sympy can read."""
expr = expr.replace("\\tfrac", "\\frac")
expr = expr.replace("\\dfrac", "\\frac")
expr = expr.replace("\\frac", " \\frac") # Play nice with mixed numbers.
expr = latex2text.LatexNodes2Text().latex_to_text(expr)
# Replace the specific characters that this parser uses.
expr = expr.replace("√", "sqrt")
expr = expr.replace("π", "pi")
expr = expr.replace("∞", "inf")
expr = expr.replace("∪", "U")
expr = expr.replace("·", "*")
expr = expr.replace("×", "*")
return expr.strip()
def _is_float(num: str) -> bool:
try:
float(num)
return True
except ValueError:
return False
def _is_int(x: float) -> bool:
try:
return abs(x - int(round(x))) <= 1e-7
except:
return False
def _is_frac(expr: str) -> bool:
return bool(re.search(r"^-?[0-9]+.?/0*[1-9][0-9]*.?$", expr))
def _str_is_int(x: str) -> bool:
try:
x = _strip_properly_formatted_commas(x)
x = float(x)
return abs(x - int(round(x))) <= 1e-7
except:
return False
def _str_to_int(x: str) -> bool:
x = x.replace(",", "")
x = float(x)
return int(x)
def _inject_implicit_mixed_number(step: str):
"""
Automatically make a mixed number evalable
e.g. 7 3/4 => 7+3/4
"""
p1 = re.compile("([0-9]) +([0-9])")
step = p1.sub("\\1+\\2", step) ## implicit mults
return step
def _strip_properly_formatted_commas(expr: str):
# We want to be careful because we don't want to strip tuple commas
p1 = re.compile("(\d)(,)(\d\d\d)($|\D)")
while True:
next_expr = p1.sub("\\1\\3\\4", expr)
if next_expr == expr:
break
expr = next_expr
return next_expr
def _normalize(expr: str) -> str:
"""Normalize answer expressions."""
if expr is None:
return None
# Remove enclosing `\text{}`.
m = re.search("^\\\\text\{(?P.+?)\}$", expr)
if m is not None:
expr = m.group("text")
expr = expr.replace("\\%", "%")
expr = expr.replace("\\$", "$")
expr = expr.replace("$", "")
expr = expr.replace("%", "")
expr = expr.replace(" or ", " , ")
expr = expr.replace(" and ", " , ")
expr = expr.replace("million", "*10^6")
expr = expr.replace("billion", "*10^9")
expr = expr.replace("trillion", "*10^12")
for unit in [
"degree",
"cm",
"centimeter",
"meter",
"mile",
"second",
"minute",
"hour",
"day",
"week",
"month",
"year",
"foot",
"feet",
"inch",
"yard",
"liter",
]:
expr = re.sub(f"{unit}(es)?(s)? *(\^[0-9]+)?", "", expr)
expr = re.sub(f"\^ *\\\\circ", "", expr)
if len(expr) > 0 and expr[0] == "{" and expr[-1] == "}":
expr = expr[1:-1]
expr = re.sub(",\\\\! *", "", expr)
if _is_float(expr) and _is_int(float(expr)):
expr = str(int(round(float(expr))))
if "\\" in expr:
try:
expr = _parse_latex(expr)
except:
pass
# edge case with mixed numbers and negative signs
expr = re.sub("- *", "-", expr)
expr = _inject_implicit_mixed_number(expr)
# don't be case sensitive for text answers
expr = expr.lower()
if _str_is_int(expr):
expr = str(_str_to_int(expr))
return expr
def count_unknown_letters_in_expr(expr: str):
expr = expr.replace("sqrt", "")
expr = expr.replace("frac", "")
letters_in_expr = set([x for x in expr if x.isalpha()])
return len(letters_in_expr)
def should_allow_eval(expr: str):
# we don't want to try parsing unknown text or functions of more than two variables
if count_unknown_letters_in_expr(expr) > 2:
return False
for bad_string in BAD_SUBSTRINGS:
if bad_string in expr:
return False
for bad_regex in BAD_REGEXES:
if re.search(bad_regex, expr) is not None:
return False
return True
def are_equal_under_sympy(ground_truth_normalized: str, given_normalized: str):
are_equal = False
try:
expr = f"({ground_truth_normalized})-({given_normalized})"
if should_allow_eval(expr):
sympy_diff = _sympy_parse(expr)
simplified = sympy.simplify(sympy_diff)
if simplified == 0:
are_equal = True
except:
pass
return are_equal
def split_tuple(expr: str):
"""
Split the elements in a tuple/interval, while handling well-formatted commas in large numbers
"""
expr = _strip_properly_formatted_commas(expr)
if len(expr) == 0:
return []
if (len(expr) > 2 and expr[0] in TUPLE_CHARS and expr[-1] in TUPLE_CHARS and
all([ch not in expr[1:-1] for ch in TUPLE_CHARS])):
elems = [elem.strip() for elem in expr[1:-1].split(",")]
else:
elems = [expr]
return elems
def grade_answer(given_answer: str, ground_truth: str) -> bool:
"""
The answer will be considered correct if:
(a) it normalizes to the same string as the ground truth answer
OR
(b) sympy can simplify the difference between the expressions to 0
"""
if given_answer is None:
return False
ground_truth_normalized_mathd = math_normalize.normalize_answer(ground_truth)
given_answer_normalized_mathd = math_normalize.normalize_answer(given_answer)
# be at least as lenient as mathd
if ground_truth_normalized_mathd == given_answer_normalized_mathd:
return True
ground_truth_normalized = _normalize(ground_truth)
given_normalized = _normalize(given_answer)
if ground_truth_normalized is None:
return False
if ground_truth_normalized == given_normalized:
return True
if len(given_normalized) == 0:
return False
ground_truth_elems = split_tuple(ground_truth_normalized)
given_elems = split_tuple(given_normalized)
if len(ground_truth_elems) > 1 and (ground_truth_normalized[0] != given_normalized[0] or
ground_truth_normalized[-1] != given_normalized[-1]):
is_correct = False
elif len(ground_truth_elems) != len(given_elems):
is_correct = False
else:
for ground_truth_elem, given_elem in zip(ground_truth_elems, given_elems):
if _is_frac(ground_truth_elem) and _is_frac(given_elem):
# if fractions aren't reduced, then shouldn't be marked as correct
# so, we don't want to allow sympy.simplify in this case
is_correct = ground_truth_elem == given_elem
elif _str_is_int(ground_truth_elem) != _str_is_int(given_elem):
# if the ground truth answer is an integer, we require the given answer to be a strict match (no sympy.simplify)
is_correct = False
else:
is_correct = are_equal_under_sympy(ground_truth_elem, given_elem)
if not is_correct:
break
return is_correct
def remove_boxed(s):
left = "\\boxed{"
try:
assert s[:len(left)] == left
assert s[-1] == "}"
return s[len(left):-1]
except:
return None
def _last_boxed_only_string(string):
idx = string.rfind("\\boxed")
if idx < 0:
idx = string.rfind("\\fbox")
if idx < 0:
return None
i = idx
left_brace_idx = None
right_brace_idx = None
num_left_braces_open = 0
while i < len(string):
if string[i] == "{":
num_left_braces_open += 1
if left_brace_idx is None:
left_brace_idx = i
elif string[i] == "}":
num_left_braces_open -= 1
if num_left_braces_open == 0:
right_brace_idx = i
break
i += 1
if left_brace_idx is None or right_brace_idx is None:
return None
return string[left_brace_idx + 1:right_brace_idx].strip()
def match_answer(response):
is_matched = False
for ans_marker in ['answer:', "answer is", "answers are"]:
ans_idx = response.lower().rfind(ans_marker)
if ans_idx != -1:
is_matched = True
response = response[ans_idx + len(ans_marker):].strip()
if response.endswith("\n"):
response = response[:-2]
for ans_marker in ["is answer", "is the answer", "are answers", "are the answers"]:
ans_idx = response.lower().rfind(ans_marker)
if ans_idx != -1:
is_matched = True
response = response[:ans_idx].strip()
if response.endswith("\n"):
response = response[:-2]
# Find boxed
ans_boxed = _last_boxed_only_string(response)
if ans_boxed:
is_matched = True
response = ans_boxed
if ". " in response:
dot_idx = response.lower().rfind(". ")
if dot_idx != -1:
response = response[:dot_idx].strip()
for ans_marker in ['be ', "is ", "are ", "=", ": ", "get ", 'be\n', "is\n", "are\n", ":\n", "get\n"]:
ans_idx = response.lower().rfind(ans_marker)
if ans_idx != -1:
is_matched = True
response = response[ans_idx + len(ans_marker):].strip()
if response.endswith("\n"):
response = response[:-2]
is_matched = is_matched if any([c.isdigit() for c in response]) else False # answer must have a digit
# Grade
return is_matched, response
import math
def compute_score(model_output: str, ground_truth: str) -> bool:
model_output = str(model_output)
ground_truth = str(ground_truth)
is_matched, extracted_model_output = match_answer(model_output)
format_correctness = "Step 2:" in model_output and "\\box" in model_output
# grade simple algebra questions. if succeeded, return; otherwise, proceed to more complex grading
if grade_answer(extracted_model_output, ground_truth):
return True, True, extracted_model_output
try:
if "\pi" in extracted_model_output or "\pi" in ground_truth:
equivs = []
for pi in [math.pi, 3.14]:
equivs.append(math_equal(extracted_model_output, ground_truth, timeout=True, pi=pi))
is_correct = any(equivs)
else:
is_correct = math_equal(extracted_model_output, ground_truth, timeout=True)
except:
is_correct = False
return is_correct, format_correctness, extracted_model_output
================================================
FILE: verl/utils/reward_score/prime_math/grader.py
================================================
# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Microsoft Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
# Copyright (c) 2023 OpenAI
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Copyright (c) 2021 Dan Hendrycks
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Copyright 2024 PRIME team and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This logic is largely copied from the Hendrycks' MATH release (math_equivalence), and borrowed from:
- https://github.com/microsoft/ToRA/blob/main/src/eval/grader.py
- https://github.com/microsoft/ProphetNet/tree/master/CRITIC
- https://github.com/openai/prm800k
"""
import contextlib
import re
import signal
import math
from math import isclose
from typing import Union
from sympy import N, simplify
from sympy.parsing.latex import parse_latex
from sympy.parsing.sympy_parser import parse_expr
def is_digit(s):
try:
if "{,}" in str(s):
num = float(str(s).replace("{,}", ""))
return True, num
num = float(str(s).replace(",", ""))
return True, num
except ValueError:
return False, None
def normalize(answer, pi) -> str:
# checking if answer is $ and removing $ in that case to compare
if isinstance(answer, str) and bool(re.match(r'\$\d+(\.\d+)?', answer)):
return answer[1:]
# checking if answer is % or \\% and removing %
if isinstance(answer, str) and (bool(re.match(r'^\d+(\.\d+)?%$', answer)) or
bool(re.match(r'^\d+(\.\d+)?\\%$', answer))):
return answer.replace("\\%", "").replace("%", "")
# handle base
answer = handle_base(answer)
# handle pi
answer = handle_pi(answer, pi)
return answer
def handle_base(x) -> str:
if isinstance(x, str) and "_" in x:
# Due to base
x = x.split("_")[0]
x = float(x)
return int(x)
return x
def handle_pi(string, pi):
if isinstance(string, str) and "\pi" in string:
# Find the first occurrence of "\pi"
idx = string.find("\pi")
# Iterate over the string and find all occurrences of "\pi" with a valid previous character
while idx != -1:
if idx > 0 and string[idx - 1].isdigit():
# Replace "\pi" with "*math.pi" if the previous character is a digit
string = string[:idx] + f"*{pi}" + string[idx + 3:]
else:
# Replace "\pi" with "1*math.pi" if the previous character is not a digit
string = string[:idx] + f"1*{pi}" + string[idx + 3:]
# Find the next occurrence of "\pi"
idx = string.find("\pi", idx + 1)
# Evaluate the expression using eval() function
try:
string = eval(string)
except:
pass
return string
def math_equal(prediction: Union[bool, float, str],
reference: Union[float, str],
include_percentage: bool = True,
tolerance: float = 1e-4,
timeout: float = 10.0,
pi: float = math.pi) -> bool:
"""
Exact match of math if and only if:
1. numerical equal: both can convert to float and are equal
2. symbolic equal: both can convert to sympy expression and are equal
"""
prediction = normalize(prediction, pi)
reference = normalize(reference, pi)
if isinstance(prediction, str) and len(prediction) > 1000: # handling weird corner-cases
prediction = prediction[:1000]
# 0. string comparison
if isinstance(prediction, str) and isinstance(reference, str):
if prediction.strip().lower() == reference.strip().lower():
return True
if prediction.replace(" ", "") == reference.replace(" ", ""):
return True
try: # 1. numerical equal
if is_digit(prediction)[0] and is_digit(reference)[0]:
prediction = is_digit(prediction)[1]
reference = is_digit(reference)[1]
# number questions
if include_percentage:
gt_result = [reference / 100, reference, reference * 100]
else:
gt_result = [reference]
for item in gt_result:
try:
if isclose(item, prediction, rel_tol=tolerance):
return True
except Exception:
continue
return False
except Exception:
pass
if not prediction and prediction not in [0, False]:
return False
# 2. symbolic equal
reference = str(reference).strip()
prediction = str(prediction).strip()
## deal with [], (), {}
prediction = format_intervals(prediction)
pred_str, ref_str = prediction, reference
if (prediction.startswith("[") and prediction.endswith("]") and
not reference.startswith("(")) or (prediction.startswith("(") and prediction.endswith(")") and
not reference.startswith("[")):
pred_str = pred_str.strip("[]()")
ref_str = ref_str.strip("[]()")
for s in ["{", "}", "(", ")"]:
ref_str = ref_str.replace(s, "")
pred_str = pred_str.replace(s, "")
if pred_str == ref_str:
return True
## [a, b] vs. [c, d], return a==c and b==d
if (prediction and reference and prediction[0] in "([" and prediction[-1] in ")]" and
prediction[0] == reference[0] and prediction[-1] == reference[-1]):
pred_parts = prediction[1:-1].split(",")
ref_parts = reference[1:-1].split(",")
if len(pred_parts) == len(ref_parts):
if all([
math_equal(pred_pt, ref_pt, include_percentage, tolerance)
for pred_pt, ref_pt in zip(pred_parts, ref_parts)
]):
return True
if "," in prediction and "," in reference:
pred_parts = [item.strip() for item in prediction.split(",")]
ref_parts = [item.strip() for item in reference.split(",")]
if len(pred_parts) == len(ref_parts):
if all([
math_equal(pred_parts[i], ref_parts[i], include_percentage, tolerance)
for i in range(len(pred_parts))
]):
return True
else:
return False
# if we have point == tuple of values
if prediction.startswith("Point") and reference[0] == "(" and reference[-1] == ")":
pred_parts = prediction[prediction.find("(") + 1:-1].split(",")
ref_parts = reference[1:-1].split(",")
if len(pred_parts) == len(ref_parts):
if all([
math_equal(pred_pt, ref_pt, include_percentage, tolerance)
for pred_pt, ref_pt in zip(pred_parts, ref_parts)
]):
return True
# if reference is a matrix
if "\begin{pmatrix}" in reference and prediction.startswith("Matrix"):
try:
pred_matrix = parse_expr(prediction)
ref_matrix_items = reference.split()[1:-1:2]
if len(pred_matrix) == len(ref_matrix_items):
if all([
math_equal(pred, ref, include_percentage, tolerance)
for ref, pred in zip(ref_matrix_items, pred_matrix)
]):
return True
except Exception:
pass
elif "\begin{pmatrix}" in reference and prediction.startswith("[") and prediction.endswith("]"):
if isinstance(eval(prediction), list):
try:
pred_matrix = eval(prediction)
# ref_matrix_items = reference.split()[1:-1:2]
ref_matrix_items = reference.lstrip("\\begin{pmatrix}").lstrip("\begin{pmatrix}").rstrip(
"\\end{pmatrix}").rstrip("\end{pmatrix}")
ref_matrix_items = ref_matrix_items.split("\\")
ref_matrix_items = [row.split("&") if "&" in row else row for row in ref_matrix_items]
if len(pred_matrix) == len(ref_matrix_items):
if all([
math_equal(pred, ref, include_percentage, tolerance)
for ref, pred in zip(ref_matrix_items, pred_matrix)
]):
return True
except Exception:
pass
return symbolic_equal(prediction, reference, tolerance, timeout)
def symbolic_equal(a, b, tolerance, timeout=10.0):
def _parse(s):
for f in [parse_expr, parse_latex]:
try:
with time_limit(timeout):
return f(s)
except Exception:
pass
return s
a = _parse(a)
b = _parse(b)
try:
with time_limit(timeout):
if simplify(a - b) == 0:
return True
except Exception:
pass
try:
with time_limit(timeout):
if isclose(N(a), N(b), rel_tol=tolerance):
return True
except Exception:
pass
return False
class TimeoutException(Exception):
pass
@contextlib.contextmanager
def time_limit(seconds: float):
def signal_handler(signum, frame):
raise TimeoutException("Timed out!")
signal.setitimer(signal.ITIMER_REAL, seconds)
signal.signal(signal.SIGALRM, signal_handler)
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
def format_intervals(prediction):
patterns = {
"Interval(": r"^Interval\((.*)\)$",
"Interval.Ropen(": r"^Interval\.Ropen\((.*)\)$",
"Interval.Lopen(": r"^Interval\.Lopen\((.*)\)$",
"Interval.open(": r"^Interval\.open\((.*)\)$",
}
for key, pattern in patterns.items():
match = re.match(pattern, prediction)
if match:
inner_content = match.group(1)
if key == "Interval(": # Intarval(a, b) == [a, b]
return f"[{inner_content}]"
elif key == "Interval.Ropen(": # Intarval.Ropen(a, b) == [a, b)
return f"[{inner_content})"
elif key == "Interval.Lopen(": # Intarval.Lopen(a, b) == (a, b]
return f"({inner_content}]"
elif key == "Interval.open(": # Intarval.open(a, b) == (a, b)
return f"({inner_content})"
return prediction
================================================
FILE: verl/utils/reward_score/prime_math/math_normalize.py
================================================
# Copyright 2024 PRIME team and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2021 Dan Hendrycks
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This logic is largely copied from the Hendrycks' MATH release (math_equivalence).
From: https://github.com/openai/prm800k/blob/main/prm800k/grading/math_normalize.py
"""
import re
from typing import Optional
def normalize_answer(answer: Optional[str]) -> Optional[str]:
if answer is None:
return None
answer = answer.strip()
try:
# Remove enclosing `\text{}`.
m = re.search("^\\\\text\{(?P.+?)\}$", answer)
if m is not None:
answer = m.group("text").strip()
return _strip_string(answer)
except:
return answer
def _fix_fracs(string):
substrs = string.split("\\frac")
new_str = substrs[0]
if len(substrs) > 1:
substrs = substrs[1:]
for substr in substrs:
new_str += "\\frac"
if substr[0] == "{":
new_str += substr
else:
try:
assert len(substr) >= 2
except:
return string
a = substr[0]
b = substr[1]
if b != "{":
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}{" + b + "}" + post_substr
else:
new_str += "{" + a + "}{" + b + "}"
else:
if len(substr) > 2:
post_substr = substr[2:]
new_str += "{" + a + "}" + b + post_substr
else:
new_str += "{" + a + "}" + b
string = new_str
return string
def _fix_a_slash_b(string):
if len(string.split("/")) != 2:
return string
a = string.split("/")[0]
b = string.split("/")[1]
try:
a = int(a)
b = int(b)
assert string == "{}/{}".format(a, b)
new_string = "\\frac{" + str(a) + "}{" + str(b) + "}"
return new_string
except:
return string
def _remove_right_units(string):
# "\\text{ " only ever occurs (at least in the val set) when describing units
if "\\text{ " in string:
splits = string.split("\\text{ ")
assert len(splits) == 2
return splits[0]
else:
return string
def _fix_sqrt(string):
if "\\sqrt" not in string:
return string
splits = string.split("\\sqrt")
new_string = splits[0]
for split in splits[1:]:
if split[0] != "{":
a = split[0]
new_substr = "\\sqrt{" + a + "}" + split[1:]
else:
new_substr = "\\sqrt" + split
new_string += new_substr
return new_string
def _strip_string(string):
# linebreaks
string = string.replace("\n", "")
# remove inverse spaces
string = string.replace("\\!", "")
# replace \\ with \
string = string.replace("\\\\", "\\")
# replace tfrac and dfrac with frac
string = string.replace("tfrac", "frac")
string = string.replace("dfrac", "frac")
# remove \left and \right
string = string.replace("\\left", "")
string = string.replace("\\right", "")
# Remove circ (degrees)
string = string.replace("^{\\circ}", "")
string = string.replace("^\\circ", "")
# remove dollar signs
string = string.replace("\\$", "")
# remove units (on the right)
string = _remove_right_units(string)
# remove percentage
string = string.replace("\\%", "")
string = string.replace("\%", "")
# " 0." equivalent to " ." and "{0." equivalent to "{." Alternatively, add "0" if "." is the start of the string
string = string.replace(" .", " 0.")
string = string.replace("{.", "{0.")
# if empty, return empty string
if len(string) == 0:
return string
if string[0] == ".":
string = "0" + string
# to consider: get rid of e.g. "k = " or "q = " at beginning
if len(string.split("=")) == 2:
if len(string.split("=")[0]) <= 2:
string = string.split("=")[1]
# fix sqrt3 --> sqrt{3}
string = _fix_sqrt(string)
# remove spaces
string = string.replace(" ", "")
# \frac1b or \frac12 --> \frac{1}{b} and \frac{1}{2}, etc. Even works with \frac1{72} (but not \frac{72}1). Also does a/b --> \\frac{a}{b}
string = _fix_fracs(string)
# manually change 0.5 --> \frac{1}{2}
if string == "0.5":
string = "\\frac{1}{2}"
# NOTE: X/Y changed to \frac{X}{Y} in dataset, but in simple cases fix in case the model output is X/Y
string = _fix_a_slash_b(string)
return string
================================================
FILE: verl/utils/seqlen_balancing.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Tuple, Callable
import heapq
import torch
from torch import distributed as dist
from tensordict import TensorDict
import copy
def karmarkar_karp(seqlen_list: List[int], k_partitions: int, equal_size: bool):
# see: https://en.wikipedia.org/wiki/Largest_differencing_method
class Set:
def __init__(self) -> None:
self.sum = 0
self.items = []
def add(self, idx: int, val: int):
self.items.append((idx, val))
self.sum += val
def merge(self, other):
for idx, val in other.items:
self.items.append((idx, val))
self.sum += val
def __lt__(self, other):
if self.sum != other.sum:
return self.sum < other.sum
if len(self.items) != len(other.items):
return len(self.items) < len(other.items)
return self.items < other.items
class State:
def __init__(self, items: List[Tuple[int, int]], k: int) -> None:
self.k = k
# sets should always be decreasing order
self.sets = [Set() for _ in range(k)]
assert len(items) in [1, k], f"{len(items)} not in [1, {k}]"
for i, (idx, seqlen) in enumerate(items):
self.sets[i].add(idx=idx, val=seqlen)
self.sets = sorted(self.sets, reverse=True)
def spread(self):
return self.sets[0].sum - self.sets[-1].sum
def get_partitions(self):
partitions = []
for i in range(len(self.sets)):
cur_partition = []
for idx, _ in self.sets[i].items:
cur_partition.append(idx)
partitions.append(cur_partition)
return partitions
def merge(self, other):
for i in range(self.k):
self.sets[i].merge(other.sets[self.k - 1 - i])
self.sets = sorted(self.sets, reverse=True)
@property
def spread(self) -> int:
return self.sets[0].sum - self.sets[-1].sum
def __lt__(self, other):
# least heap, let the state with largest spread to be popped first,
# if the spread is the same, let the state who has the largest set
# to be popped first.
if self.spread != other.spread:
return self.spread > other.spread
return self.sets[0] > other.sets[0]
def __repr__(self) -> str:
repr_str = "["
for i in range(self.k):
if i > 0:
repr_str += ","
repr_str += "{"
for j, (_, seqlen) in enumerate(self.sets[i].items):
if j > 0:
repr_str += ","
repr_str += str(seqlen)
repr_str += "}"
repr_str += "]"
return repr_str
sorted_seqlen_list = sorted([(seqlen, i) for i, seqlen in enumerate(seqlen_list)])
states_pq = []
if equal_size:
assert len(seqlen_list) % k_partitions == 0, f"{len(seqlen_list)} % {k_partitions} != 0"
for offset in range(0, len(sorted_seqlen_list), k_partitions):
items = []
for i in range(k_partitions):
seqlen, idx = sorted_seqlen_list[offset + i]
items.append((idx, seqlen))
heapq.heappush(states_pq, State(items=items, k=k_partitions))
else:
for seqlen, idx in sorted_seqlen_list:
heapq.heappush(states_pq, State(items=[(idx, seqlen)], k=k_partitions))
while len(states_pq) > 1:
state0 = heapq.heappop(states_pq)
state1 = heapq.heappop(states_pq)
# merge states
state0.merge(state1)
heapq.heappush(states_pq, state0)
final_state = states_pq[0]
partitions = final_state.get_partitions()
if equal_size:
for i, partition in enumerate(partitions):
assert len(partition) * \
k_partitions == len(seqlen_list), f"{len(partition)} * {k_partitions} != {len(seqlen_list)}"
return partitions
def greedy_partition(seqlen_list: List[int], k_partitions: int, equal_size: bool):
bias = sum(seqlen_list) + 1 if equal_size else 0
sorted_seqlen = [(seqlen + bias, i) for i, seqlen in enumerate(seqlen_list)]
partitions = [[] for _ in range(k_partitions)]
partition_sums = [0 for _ in range(k_partitions)]
for seqlen, i in sorted_seqlen:
min_idx = None
for j in range(k_partitions):
if min_idx is None or partition_sums[j] < partition_sums[min_idx]:
min_idx = j
partitions[min_idx].append(i)
partition_sums[min_idx] += seqlen
if equal_size:
for i, partition in enumerate(partitions):
assert len(partition) * \
k_partitions == len(seqlen_list), f"{len(partition)} * {k_partitions} != {len(seqlen_list)}"
return partitions
def get_seqlen_balanced_partitions(seqlen_list: List[int], k_partitions: int, equal_size: bool):
""" get order of seq lengths to make partitions balanced, this is
used in balacing sum of seqlength across dp ranks and microbatches
Parameters:
seqlen_list (List[int]):
seq lengths of each items
k_partitions (int):
resulting number of partitions
equal_size (bool):
if True, number of items in each partitions must be equal.
if False, only consider balancing the sum, each partition can have
variable number of items
Returns:
partitions (List[List[int]]):
return k_partitions list containing the index of items.
"""
assert len(seqlen_list) >= k_partitions, f"number of items:[{len(seqlen_list)}] < k_partitions:[{k_partitions}]"
def _check_and_sort_partitions(partitions):
assert len(partitions) == k_partitions, f"{len(partitions)} != {k_partitions}"
seen_idx = set()
sorted_partitions = [None] * k_partitions
for i, partition in enumerate(partitions):
assert len(partition) > 0, f"the {i}-th partition is empty"
for idx in partition:
seen_idx.add(idx)
sorted_partitions[i] = sorted(partition)
assert seen_idx == set(range(len(seqlen_list)))
return sorted_partitions
partitions = karmarkar_karp(seqlen_list=seqlen_list, k_partitions=k_partitions, equal_size=equal_size)
return _check_and_sort_partitions(partitions)
def log_seqlen_unbalance(seqlen_list: List[int], partitions: List[List[int]], prefix):
# add some metrics of seqlen sum on dp ranks
k_partition = len(partitions)
# assert len(seqlen_list) % k_partition == 0
batch_size = len(seqlen_list) // k_partition
min_sum_seqlen = None
max_sum_seqlen = None
total_sum_seqlen = 0
for offset in range(0, len(seqlen_list), batch_size):
cur_sum_seqlen = sum(seqlen_list[offset:offset + batch_size])
if min_sum_seqlen is None or cur_sum_seqlen < min_sum_seqlen:
min_sum_seqlen = cur_sum_seqlen
if max_sum_seqlen is None or cur_sum_seqlen > max_sum_seqlen:
max_sum_seqlen = cur_sum_seqlen
total_sum_seqlen += cur_sum_seqlen
balanced_sum_seqlen_list = []
for partition in partitions:
cur_sum_seqlen_balanced = sum([seqlen_list[i] for i in partition])
balanced_sum_seqlen_list.append(cur_sum_seqlen_balanced)
# print("balanced_sum_seqlen_list: ", balanced_sum_seqlen_list)
min_sum_seqlen_balanced = min(balanced_sum_seqlen_list)
max_sum_seqlen_balanced = max(balanced_sum_seqlen_list)
return {
f'{prefix}/min': min_sum_seqlen,
f'{prefix}/max': max_sum_seqlen,
f'{prefix}/minmax_diff': max_sum_seqlen - min_sum_seqlen,
f'{prefix}/balanced_min': min_sum_seqlen_balanced,
f'{prefix}/balanced_max': max_sum_seqlen_balanced,
f'{prefix}/mean': total_sum_seqlen / len(partitions)
}
def ceildiv(a, b):
return -(a // -b)
def rearrange_micro_batches(batch: TensorDict, max_token_len, dp_group=None):
"""Split the batch into a list of micro_batches, where the max_token_len is smaller than max_token_len
and the number of valid tokens in each micro batch is well balanced.
"""
# this is per local micro_bsz
max_seq_len = batch['attention_mask'].shape[-1]
assert max_token_len >= max_seq_len, \
f'max_token_len must be greater than the sequence length. Got {max_token_len=} and {max_seq_len=}'
seq_len_effective: torch.Tensor = batch['attention_mask'].sum(dim=1)
total_seqlen = seq_len_effective.sum().item()
num_micro_batches = ceildiv(total_seqlen, max_token_len)
if dist.is_initialized():
num_micro_batches = torch.tensor([num_micro_batches], device='cuda')
dist.all_reduce(num_micro_batches, op=dist.ReduceOp.MAX, group=dp_group)
num_micro_batches = num_micro_batches.cpu().item()
seq_len_effective = seq_len_effective.tolist()
assert num_micro_batches <= len(seq_len_effective)
micro_bsz_idx = get_seqlen_balanced_partitions(seq_len_effective, num_micro_batches, equal_size=False)
micro_batches = []
for partition in micro_bsz_idx:
curr_micro_batch = []
for idx in partition:
curr_micro_batch.append(batch[idx:idx + 1])
curr_micro_batch = torch.cat(curr_micro_batch)
micro_batches.append(curr_micro_batch)
return micro_batches, micro_bsz_idx
def get_reverse_idx(idx_map):
reverse_idx_map = copy.deepcopy(idx_map)
for i, idx in enumerate(idx_map):
reverse_idx_map[idx] = i
return reverse_idx_map
================================================
FILE: verl/utils/tokenizer.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for tokenization."""
import warnings
__all__ = ['hf_tokenizer', 'hf_processor']
def set_pad_token_id(tokenizer):
"""Set pad_token_id to eos_token_id if it is None.
Args:
tokenizer (transformers.PreTrainedTokenizer): The tokenizer to be set.
"""
if tokenizer.pad_token_id is None:
tokenizer.pad_token_id = tokenizer.eos_token_id
warnings.warn(f'tokenizer.pad_token_id is None. Now set to {tokenizer.eos_token_id}')
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
warnings.warn(f'tokenizer.pad_token is None. Now set to {tokenizer.eos_token}')
def hf_tokenizer(name_or_path, correct_pad_token=True, correct_gemma2=True, **kwargs):
"""Create a huggingface pretrained tokenizer which correctness handles eos and pad tokens.
Args:
name (str): The name of the tokenizer.
correct_pad_token (bool): Whether to correct the pad token id.
correct_gemma2 (bool): Whether to correct the gemma2 tokenizer.
Returns:
transformers.PreTrainedTokenizer: The pretrained tokenizer.
"""
from transformers import AutoTokenizer
if correct_gemma2 and isinstance(name_or_path, str) and 'gemma-2-2b-it' in name_or_path:
# the EOS token in gemma2 is ambiguious, which may worsen RL performance.
# https://huggingface.co/google/gemma-2-2b-it/commit/17a01657f5c87135bcdd0ec7abb4b2dece04408a
warnings.warn('Found gemma-2-2b-it tokenizer. Set eos_token and eos_token_id to and 107.')
kwargs['eos_token'] = ''
kwargs['eos_token_id'] = 107
tokenizer = AutoTokenizer.from_pretrained(name_or_path, **kwargs)
if correct_pad_token:
set_pad_token_id(tokenizer)
return tokenizer
def hf_processor(name_or_path, **kwargs):
"""Create a huggingface processor to process multimodal data.
Args:
name_or_path (str): The name of the processor.
Returns:
transformers.ProcessorMixin: The pretrained processor.
"""
from transformers import AutoProcessor
try:
processor = AutoProcessor.from_pretrained(name_or_path, **kwargs)
except Exception:
processor = None
# Avoid load tokenizer, see:
# https://github.com/huggingface/transformers/blob/v4.49.0/src/transformers/models/auto/processing_auto.py#L344
if processor is not None and "Processor" not in processor.__class__.__name__:
processor = None
return processor
================================================
FILE: verl/utils/torch_dtypes.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adapted from Cruise.
"""
import torch
from typing import Union
HALF_LIST = [16, "16", "fp16", "float16", torch.float16]
FLOAT_LIST = [32, "32", "fp32", "float32", torch.float32]
BFLOAT_LIST = ["bf16", "bfloat16", torch.bfloat16]
class PrecisionType(object):
"""Type of precision used.
>>> PrecisionType.HALF == 16
True
>>> PrecisionType.HALF in (16, "16")
True
"""
HALF = "16"
FLOAT = "32"
FULL = "64"
BFLOAT = "bf16"
MIXED = "mixed"
@staticmethod
def supported_type(precision: Union[str, int]) -> bool:
return any(x == precision for x in PrecisionType)
@staticmethod
def supported_types() -> list[str]:
return [x.value for x in PrecisionType]
@staticmethod
def is_fp16(precision):
return precision in HALF_LIST
@staticmethod
def is_fp32(precision):
return precision in FLOAT_LIST
@staticmethod
def is_bf16(precision):
return precision in BFLOAT_LIST
@staticmethod
def to_dtype(precision):
if precision in HALF_LIST:
return torch.float16
elif precision in FLOAT_LIST:
return torch.float32
elif precision in BFLOAT_LIST:
return torch.bfloat16
else:
raise RuntimeError(f"unexpected precision: {precision}")
@staticmethod
def to_str(precision):
if precision == torch.float16:
return 'fp16'
elif precision == torch.float32:
return 'fp32'
elif precision == torch.bfloat16:
return 'bf16'
else:
raise RuntimeError(f"unexpected precision: {precision}")
================================================
FILE: verl/utils/torch_functional.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contain small torch utilities
"""
from typing import Dict, Union, List, Optional
import torch
import torch.distributed
import torch.nn.functional as F
from tensordict import TensorDict
from torch import nn
try:
from flash_attn.ops.triton.cross_entropy import cross_entropy_loss
FLAH_ATTN_CROSS_ENTROPY_LOSS_AVAILABLE = True
except ImportError:
FLAH_ATTN_CROSS_ENTROPY_LOSS_AVAILABLE = False
def gather_from_labels(data, label):
"""Gather the label from data. The value in label should be [0, vocab_size)
Args:
data: (..., vocab_size)
label (torch.IntTensor) : (...,)
Returns:
"""
output = torch.gather(data, -1, label.unsqueeze(-1)).squeeze(-1)
return output
def logprobs_from_logits(logits, labels):
"""
See: https://github.com/pytorch/pytorch/issues/563#issuecomment-330103591
"""
if FLAH_ATTN_CROSS_ENTROPY_LOSS_AVAILABLE:
batch_dim = logits.shape[:-1]
last_dim = logits.shape[-1]
logits = logits.reshape(-1, last_dim)
labels = labels.reshape(-1)
output = logprobs_from_logits_flash_attn(logits, labels)
output = output.view(*batch_dim)
else:
output = logprobs_from_logits_v2(logits, labels)
return output
def logprobs_from_logits_flash_attn(logits, labels):
output = cross_entropy_loss(logits, labels)
assert isinstance(
output, tuple), "please make sure flash-attn>=2.4.3 where cross_entropy_loss returns Tuple[losses, z_losses]."
return -output[0]
def logprobs_from_logits_naive(logits, labels):
logp = F.log_softmax(logits, dim=-1)
logpy = gather_from_labels(logp, labels)
return logpy
def logprobs_from_logits_v2(logits: torch.FloatTensor, labels):
"""
A memory efficient implementation of logprobs_from_logits
"""
if logits.dtype in [torch.float32, torch.float64]:
logits_labels = torch.gather(logits, dim=-1, index=labels.unsqueeze(-1)).squeeze(-1)
# loop to reduce peak mem consumption
logsumexp_values = torch.stack([torch.logsumexp(l, dim=-1) for l in logits])
logprobs_labels = logits_labels - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
else:
# logsumexp approach is unstable with bfloat16, fall back to slightly less efficent approach
logprobs_labels = []
for row_logits, row_labels in zip(logits, labels): # loop to reduce peak mem consumption
row_logprobs = F.log_softmax(row_logits, dim=-1)
row_logprobs_labels = row_logprobs.gather(dim=-1, index=row_labels.unsqueeze(-1)).squeeze(-1)
logprobs_labels.append(row_logprobs_labels)
logprobs_labels = torch.stack(logprobs_labels)
return logprobs_labels
def clip_by_value(x, tensor_min, tensor_max):
"""
Tensor extenstion to torch.clamp
https://github.com/pytorch/pytorch/issues/2793#issuecomment-428784713
"""
clipped = torch.max(torch.min(x, tensor_max), tensor_min)
return clipped
def entropy_from_logits(logits: torch.Tensor):
"""Calculate entropy from logits."""
pd = torch.nn.functional.softmax(logits, dim=-1)
entropy = torch.logsumexp(logits, dim=-1) - torch.sum(pd * logits, dim=-1)
return entropy
def masked_sum(values, mask, axis=None):
"""Compute mean of tensor with a masked values."""
return (values * mask).sum(axis=axis)
def masked_mean(values, mask, axis=None):
"""Compute mean of tensor with a masked values."""
return (values * mask).sum(axis=axis) / mask.sum(axis=axis)
def masked_var(values, mask, unbiased=True):
"""Compute variance of tensor with masked values."""
mean = masked_mean(values, mask)
centered_values = values - mean
variance = masked_mean(centered_values**2, mask)
if unbiased:
mask_sum = mask.sum()
if mask_sum == 0:
raise ValueError("At least one element in the mask has to be 1.")
# note that if mask_sum == 1, then there is a division by zero issue
# to avoid it you just need to use a larger minibatch_size
if mask_sum == 1:
raise ValueError("The sum of the mask is one, which can cause a division by zero.")
bessel_correction = mask_sum / (mask_sum - 1)
variance = variance * bessel_correction
return variance
def masked_whiten(values, mask, shift_mean=True):
"""Whiten values with masked values."""
mean, var = masked_mean(values, mask), masked_var(values, mask)
whitened = (values - mean) * torch.rsqrt(var + 1e-8)
if not shift_mean:
whitened += mean
return whitened
def get_eos_mask(response_id: torch.Tensor, eos_token: Union[int, List[int]] = 2, dtype=torch.int64):
'''
end of sentence token can be int or list: 1 or [1, 2]
e.g. eos_token=1
response_id: [0, 0, 2, 42, 3, 5, 1, 0, 0]
eos_mask: [1, 1, 1, 1, 1, 1, 1, 0, 0]
'''
if isinstance(eos_token, int):
eos_token = [eos_token]
eos_mask = torch.zeros_like(response_id, dtype=torch.bool)
for token in eos_token:
eos_mask |= response_id.eq(token)
eos_mask = eos_mask.long()
eos_mask = (torch.cumsum(eos_mask, dim=1) - eos_mask).bool()
eos_mask = torch.logical_not(eos_mask).to(dtype)
return eos_mask
def compute_grad_norm(model: nn.Module):
total_grad_square = 0
total_params = 0
for param in model.parameters():
if param.grad is not None:
total_grad_square += torch.sum(torch.square(param.grad.detach())).item()
return total_grad_square
def broadcast_dict_tensor(tensors: Union[Dict[str, torch.Tensor], TensorDict], src, group):
"""
TODO: optimize this. Technically, we only need one broadcast
"""
for key in tensors.sorted_keys:
torch.distributed.broadcast(tensors[key], src=src, group=group, async_op=False)
def allgather_dict_tensors(tensors: Union[Dict[str, torch.Tensor], TensorDict], size, group, dim=0):
"""
TODO: optimize this.
- We can use async ops
- We can use only one allgather
Args:
tensors:
size:
group:
Returns:
"""
if isinstance(tensors, TensorDict):
is_tensor_dict = True
tensors_as_dict = tensors.to_dict()
else:
tensors_as_dict = tensors
is_tensor_dict = False
output = {}
sorted_keys = sorted(tensors_as_dict.keys())
for key in sorted_keys:
val = tensors_as_dict[key]
output[key] = [torch.empty_like(val) for _ in range(size)]
torch.distributed.all_gather(output[key], val, group=group, async_op=False)
output[key] = torch.cat(output[key], dim=dim)
if is_tensor_dict:
output = TensorDict(source=output, batch_size=tensors.batch_size[0] * size)
return output
def split_dict_tensor_into_batches(tensors: TensorDict, batch_size) -> List[TensorDict]:
assert tensors.batch_size[0] % batch_size == 0, \
f'input data batch size: {tensors.batch_size[0]}, split batch size: {batch_size}'
return tensors.split(batch_size)
def pad_2d_list_to_length(response, pad_token_id, max_length=None):
"""
pad a 2D list (e.g. responses, logprobs) to a 2D tensor.
"""
response_length = max(len(sub_list) for sub_list in response)
if max_length is not None and max_length > response_length:
target_length = max_length
else:
target_length = response_length
padded_response = [tuple(sub_list) + (pad_token_id,) * (target_length - len(sub_list)) for sub_list in response]
tensor = torch.tensor(padded_response)
return tensor
def pad_sequence_to_length(tensors, max_seq_len, pad_token_id, left_pad=False):
"""
pad a 2D tensors (e.g. responses, logprobs) in the last dim to max_seq_length.
input shape: [bs, seq_length]
output shape: [bs, max_seq_length]
(0, max_seq_len - tensors.shape[-1]) means right pad to max_seq_length and no left pad
"""
if tensors.shape[-1] >= max_seq_len:
return tensors
pad_tuple = (max_seq_len - tensors.shape[-1], 0) if left_pad else (0, max_seq_len - tensors.shape[-1])
return F.pad(tensors, pad_tuple, 'constant', pad_token_id)
from transformers import PreTrainedTokenizer
def tokenize_and_postprocess_data(prompt: str,
tokenizer: PreTrainedTokenizer,
max_length: int,
pad_token_id: int,
left_pad=True,
truncation='error'):
"""
input_data is the output from tokenizer.
"""
assert truncation in ['left', 'right', 'error']
input_data = tokenizer(prompt, return_tensors='pt', add_special_tokens=False)
input_ids = input_data['input_ids']
attention_mask = input_data['attention_mask']
assert input_ids.ndim == 2
sequence_length = input_ids.shape[-1]
if sequence_length < max_length:
input_ids = pad_sequence_to_length(input_ids,
max_seq_len=max_length,
pad_token_id=pad_token_id,
left_pad=left_pad)
attention_mask = pad_sequence_to_length(attention_mask,
max_seq_len=max_length,
pad_token_id=0,
left_pad=left_pad)
elif sequence_length > max_length:
if truncation == 'left':
# actually, left truncation may not be reasonable
input_ids = input_ids[:, -max_length:]
attention_mask = attention_mask[:, -max_length:]
elif truncation == 'right':
input_ids = input_ids[:, :max_length]
attention_mask = attention_mask[:, :max_length]
elif truncation == 'error':
raise NotImplementedError(f'{sequence_length=} is larger than {max_length=}')
else:
raise NotImplementedError(f'Unknown truncation method {truncation}')
return input_ids, attention_mask
def remove_pad_token(input_ids: torch.Tensor, attention_mask: torch.Tensor):
""" Remove the pad token.
Args:
input_ids shape: [bs, seq_length]
attention_mask shape: [bs, seq_length]
Returns:
no_padding_batch(List[List[int]]): contains the rmpad token ids per query.
"""
no_padding_batch = []
for ids, mask in zip(input_ids, attention_mask):
no_padding_batch.append((ids[len(ids) - mask.sum():]).cpu().numpy().tolist())
return no_padding_batch
def log_probs_from_logits_response(input_ids, logits, response_length):
"""Compute the response log_probs from full logits. Note that logits = model(input_ids)
Args:
input_ids: [batch_size, seqlen]
logits: [batch_size, seqlen, vocab_size]
Returns:
response_log_prob:
"""
response_logits = logits[:, -response_length - 1:-1]
response = input_ids[:, -response_length:]
response_log_prob = logprobs_from_logits(logits=response_logits, labels=response)
return response_log_prob
def log_probs_from_logits_response_rmpad(input_ids, attention_mask, logits_rmpad, response_length):
"""Compute the log_probs from logits with rmpad logits and pad input. Note that
logits_rmpad = model(input_ids_rmpad). For each sentences, there is a shift between
logits and input_ids.
The reason for this function to is to compute logprobs_from_logits in rmpad mode because it is memory-intensive
for large vocab_size
Args:
input_ids: [batch_size, seqlen]
attention_mask: [batch_size, seqlen]
logits_rmpad: [total_nnz, vocab_size]
response_length: int
"""
from flash_attn.bert_padding import pad_input, unpad_input
batch_size, seqlen = input_ids.shape
input_ids_rmpad, indices, *_ = unpad_input(input_ids.unsqueeze(-1), attention_mask=attention_mask)
input_ids_rmpad = input_ids_rmpad.squeeze(-1)
input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=0)
full_log_probs_rmpad = logprobs_from_logits(logits=logits_rmpad, labels=input_ids_rmpad_rolled) # (total_nnz,)
full_output = pad_input(hidden_states=full_log_probs_rmpad.unsqueeze(-1),
indices=indices,
batch=batch_size,
seqlen=seqlen)
output = full_output.squeeze(-1)[:, -response_length - 1:-1] # [batch_size, response_length]
return output
def log_probs_from_logits_all_rmpad(input_ids_rmpad, logits_rmpad, indices, batch_size, seqlen, response_length):
"""Compute the log_probs from logits with rmpad input_ids and logits. Note that
logits_rmpad = model(input_ids_rmpad). For each sentences, there is a shift between
logits and input_ids.
The reason for this function to is to compute logprobs_from_logits in rmpad mode because it is memory-intensive
for large vocab_size
Args:
input_ids_rmpad: [1, total_nnz]
logits_rmpad: [total_nnz, vocab_size]
indices: [total_nnz]
batch_size: int
seqlen: int
response_length: int
"""
from flash_attn.bert_padding import pad_input
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # transpose back to [total_nnz, 1]
input_ids_rmpad = input_ids_rmpad.squeeze(-1)
input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=0)
full_log_probs_rmpad = logprobs_from_logits(logits=logits_rmpad, labels=input_ids_rmpad_rolled) # (total_nnz,)
full_output = pad_input(hidden_states=full_log_probs_rmpad.unsqueeze(-1),
indices=indices,
batch=batch_size,
seqlen=seqlen)
output = full_output.squeeze(-1)[:, -response_length - 1:-1] # [batch_size, response_length]
return output
from transformers.generation.logits_process import (TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper)
def post_process_logits(input_ids, logits, temperature, top_k, top_p):
if temperature != 1.:
logits = logits.div_(temperature) # inplace operation to avoid OOM
# TODO: add them back
# if top_k is not None and top_k > 0:
# logits = TopKLogitsWarper(top_k=top_k)(input_ids, logits)
# if top_p is not None and top_p < 1.0 and top_p > 0.0:
# logits = TopPLogitsWarper(top_p=top_p)(input_ids, logits)
return logits
"""
Optimizer related
"""
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
import math
def get_cosine_schedule_with_warmup(
optimizer: Optimizer,
num_warmup_steps: int,
num_training_steps: int,
min_lr_ratio: float = 0.0,
num_cycles: float = 0.5,
last_epoch: int = -1,
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
min_lr_ratio (:obj:`float`, `optional`, defaults to 0.0):
The minimum lr ratio w.r.t the maximum.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
assert min_lr_ratio >= 0 and min_lr_ratio <= 1.
coef = (1 - min_lr_ratio) * 0.5
intercept = (1 + min_lr_ratio) * 0.5
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
x = math.cos(math.pi * float(num_cycles) * 2.0 * progress)
return max(0.0, x * coef + intercept)
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_constant_schedule_with_warmup(
optimizer: Optimizer,
num_warmup_steps: int,
last_epoch: int = -1,
):
def lr_lambda(current_step):
return min(1, float(current_step) / float(max(1, num_warmup_steps)))
return LambdaLR(optimizer, lr_lambda, last_epoch)
def prepare_decoder_attention_mask(attention_mask, input_shape, inputs_embeds):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
device=inputs_embeds.device,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype,
tgt_len=input_shape[-1]).to(inputs_embeds.device)
combined_attention_mask = (expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask +
combined_attention_mask)
return combined_attention_mask
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
def get_unpad_data(attention_mask):
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = seqlens_in_batch.max().item()
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
return (
indices,
cu_seqlens,
max_seqlen_in_batch,
)
================================================
FILE: verl/utils/tracking.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A unified tracking interface that supports logging data to different backend
"""
import dataclasses
from enum import Enum
from functools import partial
from pathlib import Path
from typing import List, Union, Dict, Any
class Tracking(object):
supported_backend = ["wandb", "mlflow", "swanlab", "vemlp_wandb", "tensorboard", "console"]
def __init__(self, project_name, experiment_name, default_backend: Union[str, List[str]] = 'console', config=None):
if isinstance(default_backend, str):
default_backend = [default_backend]
for backend in default_backend:
if backend == 'tracking':
import warnings
warnings.warn("`tracking` logger is deprecated. use `wandb` instead.", DeprecationWarning)
else:
assert backend in self.supported_backend, f'{backend} is not supported'
self.logger = {}
if 'tracking' in default_backend or 'wandb' in default_backend:
import wandb
wandb.init(project=project_name, name=experiment_name, config=config)
self.logger['wandb'] = wandb
if 'mlflow' in default_backend:
import mlflow
mlflow.start_run(run_name=experiment_name)
mlflow.log_params(_compute_mlflow_params_from_objects(config))
self.logger['mlflow'] = _MlflowLoggingAdapter()
if "swanlab" in default_backend:
import swanlab
import os
SWANLAB_API_KEY = os.environ.get("SWANLAB_API_KEY", None)
SWANLAB_LOG_DIR = os.environ.get("SWANLAB_LOG_DIR", "swanlog")
SWANLAB_MODE = os.environ.get("SWANLAB_MODE", "cloud")
if SWANLAB_API_KEY:
swanlab.login(SWANLAB_API_KEY) # NOTE: previous login information will be overwritten
swanlab.init(project=project_name,
experiment_name=experiment_name,
config=config,
logdir=SWANLAB_LOG_DIR,
mode=SWANLAB_MODE)
self.logger["swanlab"] = swanlab
if 'vemlp_wandb' in default_backend:
import os
import volcengine_ml_platform
from volcengine_ml_platform import wandb as vemlp_wandb
volcengine_ml_platform.init(
ak=os.environ["VOLC_ACCESS_KEY_ID"],
sk=os.environ["VOLC_SECRET_ACCESS_KEY"],
region=os.environ["MLP_TRACKING_REGION"],
)
vemlp_wandb.init(
project=project_name,
name=experiment_name,
config=config,
sync_tensorboard=True,
)
self.logger['vemlp_wandb'] = vemlp_wandb
if 'tensorboard' in default_backend:
self.logger['tensorboard'] = _TensorboardAdapter()
if 'console' in default_backend:
from verl.utils.logger.aggregate_logger import LocalLogger
self.console_logger = LocalLogger(print_to_console=True)
self.logger['console'] = self.console_logger
def log(self, data, step, backend=None):
for default_backend, logger_instance in self.logger.items():
if backend is None or default_backend in backend:
logger_instance.log(data=data, step=step)
def __del__(self):
if 'wandb' in self.logger:
self.logger['wandb'].finish(exit_code=0)
if 'swanlab' in self.logger:
self.logger['swanlab'].finish()
if 'vemlp_wandb' in self.logger:
self.logger['vemlp_wandb'].finish(exit_code=0)
if 'tensorboard' in self.logger:
self.logger['tensorboard'].finish()
class _TensorboardAdapter:
def __init__(self):
from torch.utils.tensorboard import SummaryWriter
import os
tensorboard_dir = os.environ.get("TENSORBOARD_DIR", "tensorboard_log")
os.makedirs(tensorboard_dir, exist_ok=True)
print(f"Saving tensorboard log to {tensorboard_dir}.")
self.writer = SummaryWriter(tensorboard_dir)
def log(self, data, step):
for key in data:
self.writer.add_scalar(key, data[key], step)
def finish(self):
self.writer.close()
class _MlflowLoggingAdapter:
def log(self, data, step):
import mlflow
mlflow.log_metrics(metrics=data, step=step)
def _compute_mlflow_params_from_objects(params) -> Dict[str, Any]:
if params is None:
return {}
return _flatten_dict(_transform_params_to_json_serializable(params, convert_list_to_dict=True), sep='/')
def _transform_params_to_json_serializable(x, convert_list_to_dict: bool):
_transform = partial(_transform_params_to_json_serializable, convert_list_to_dict=convert_list_to_dict)
if dataclasses.is_dataclass(x):
return _transform(dataclasses.asdict(x))
if isinstance(x, dict):
return {k: _transform(v) for k, v in x.items()}
if isinstance(x, list):
if convert_list_to_dict:
return {'list_len': len(x)} | {f'{i}': _transform(v) for i, v in enumerate(x)}
else:
return [_transform(v) for v in x]
if isinstance(x, Path):
return str(x)
if isinstance(x, Enum):
return x.value
return x
def _flatten_dict(raw: Dict[str, Any], *, sep: str) -> Dict[str, Any]:
import pandas as pd
ans = pd.json_normalize(raw, sep=sep).to_dict(orient='records')[0]
assert isinstance(ans, dict)
return ans
================================================
FILE: verl/utils/ulysses.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for DeepSpeed Ulysses Sequence Parallelism.
DeepSpeed Ulysses Paper: https://arxiv.org/abs/2309.14509
Inspired from: https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/sequence/layer.py
"""
from typing import Any, Optional, List, Tuple
import torch
from torch import Tensor
import torch.distributed as dist
from torch.distributed import ProcessGroup
_ULYSSES_SEQUENCE_PARALLEL_GROUP = None
def set_ulysses_sequence_parallel_group(group: dist.ProcessGroup):
"""
Set ulysses sequence parallel process group.
"""
global _ULYSSES_SEQUENCE_PARALLEL_GROUP
_ULYSSES_SEQUENCE_PARALLEL_GROUP = group
def get_ulysses_sequence_parallel_group() -> Optional[dist.ProcessGroup]:
"""
Get ulysses sequence parallel process group.
"""
global _ULYSSES_SEQUENCE_PARALLEL_GROUP
return _ULYSSES_SEQUENCE_PARALLEL_GROUP
def get_ulysses_sequence_parallel_world_size(group: ProcessGroup = None) -> int:
"""
Get ulysses sequence parallel world size.
"""
group = get_ulysses_sequence_parallel_group() if group is None else group
return dist.get_world_size(group) if group else 1
def get_ulysses_sequence_parallel_rank(group: ProcessGroup = None) -> int:
"""
Get ulysses sequence parallel rank.
"""
group = get_ulysses_sequence_parallel_group() if group is None else group
return dist.get_rank(group) if group else 0
def gather_seq_scatter_heads(
x: Tensor,
seq_dim: int,
head_dim: int,
unpadded_dim_size: int = 0,
group: ProcessGroup = None,
) -> Tensor:
"""
A func to sync embedding input with alltoall in sequence parallel
gather sequence dimension and scatter head dim:
e.g. seq_dim: 1, head_dim: 2
[bsz, seq/n, h, ...] -> [bsz, seq, h/n, ...]
"""
group = get_ulysses_sequence_parallel_group() if group is None else group
if not group:
return x
sp_world = get_ulysses_sequence_parallel_world_size(group)
x = SeqAllToAll.apply(group, x, head_dim, seq_dim)
if unpadded_dim_size and unpadded_dim_size % sp_world != 0:
padding_size = x.size(seq_dim) - unpadded_dim_size
x = _unpad_tensor(x, seq_dim, padding_size)
return x
def gather_heads_scatter_seq(x: Tensor, head_dim: int, seq_dim: int, group: ProcessGroup = None) -> Tensor:
"""
A func to sync attention result with alltoall in sequence parallel
gather head dimension and scatter seq dim:
e.g. seq_dim: 1, head_dim: 2
[bsz, seq, h/n, ...] -> [bsz, seq/n, h, ...]
"""
group = get_ulysses_sequence_parallel_group() if group is None else group
if not group:
return x
dim_size = x.size(seq_dim)
sp_world = get_ulysses_sequence_parallel_world_size(group)
if dim_size % sp_world != 0:
padding_size = sp_world - (dim_size % sp_world)
x = _pad_tensor(x, seq_dim, padding_size)
return SeqAllToAll.apply(group, x, seq_dim, head_dim, False)
def _pad_tensor(x: Tensor, dim: int, padding_size: int) -> Tensor:
shape = list(x.shape)
shape[dim] = padding_size
pad = torch.zeros(shape, dtype=x.dtype, device=x.device)
return torch.cat([x, pad], dim=dim)
def _unpad_tensor(x: Tensor, dim: int, padding_size: int) -> Tensor:
slc = [slice(None)] * len(x.shape)
slc[dim] = slice(0, -padding_size)
return x[slc]
def slice_input_tensor(x: Tensor, dim: int, padding: bool = True, group: ProcessGroup = None) -> Tensor:
group = get_ulysses_sequence_parallel_group() if group is None else group
sp_world_size = dist.get_world_size(group)
sp_rank = get_ulysses_sequence_parallel_rank()
dim_size = x.size(dim)
# pad before slice
if padding and dim_size % sp_world_size:
padding_size = sp_world_size - (dim_size % sp_world_size)
x = _pad_tensor(x, dim, padding_size)
# slice the input tensor
parts = x.size(dim) // sp_world_size
slc = [slice(None)] * len(x.shape)
slc[dim] = slice(sp_rank * parts, (sp_rank + 1) * parts)
return x[slc].contiguous()
def all_to_all_tensor(
local_input: Tensor,
scatter_dim: int,
gather_dim: int,
group: Optional[dist.ProcessGroup] = None,
async_op: bool = False,
):
group = get_ulysses_sequence_parallel_group() if group is None else group
seq_world_size = dist.get_world_size(group)
input_list = [t.contiguous() for t in torch.tensor_split(local_input, seq_world_size, scatter_dim)]
output_list = [torch.empty_like(input_list[0]) for _ in range(seq_world_size)]
comm = dist.all_to_all(output_list, input_list, group=group, async_op=async_op)
if async_op:
def wait():
comm.wait()
return torch.cat(output_list, dim=gather_dim).contiguous()
return wait
return torch.cat(output_list, dim=gather_dim).contiguous()
def all_gather_tensor(local_tensor: Tensor, group: Optional[dist.ProcessGroup] = None, async_op: bool = False):
group = get_ulysses_sequence_parallel_group() if group is None else group
sp_world_size = dist.get_world_size(group=group)
output_shape = list(local_tensor.shape)
output_shape[0] = output_shape[0] * sp_world_size
output = torch.empty(output_shape, dtype=local_tensor.dtype, device=local_tensor.device)
dist.all_gather_into_tensor(output, local_tensor, group=group, async_op=async_op)
return output
class SeqAllToAll(torch.autograd.Function):
@staticmethod
def forward(
ctx: Any,
group: dist.ProcessGroup,
local_input: Tensor,
scatter_dim: int,
gather_dim: int,
async_op: bool = False,
) -> Tensor:
ctx.group = group
ctx.scatter_dim = scatter_dim
ctx.gather_dim = gather_dim
ctx.async_op = async_op
return all_to_all_tensor(local_input, scatter_dim, gather_dim, group, async_op)
@staticmethod
def backward(ctx: Any, *grad_output: Tensor) -> Tuple[None, Tensor, None, None]:
if ctx.async_op:
input_t = torch.cat(grad_output[1:], dim=ctx.gather_dim).contiguous()
else:
input_t = grad_output[0]
return (
None,
all_to_all_tensor(input_t, ctx.gather_dim, ctx.scatter_dim, ctx.group, False),
None,
None,
None,
None,
)
class Gather(torch.autograd.Function):
@staticmethod
def forward(ctx: Any,
group: dist.ProcessGroup,
local_tensor: Tensor,
gather_dim: int,
grad_scaler: bool = True,
async_op=False) -> Tensor:
ctx.group = group
ctx.gather_dim = gather_dim
ctx.grad_scaler = grad_scaler
ctx.async_op = async_op
sp_world_size = dist.get_world_size(group=group)
ctx.sp_world_size = sp_world_size
sp_rank = dist.get_rank(group=group)
ctx.sp_rank = sp_rank
local_shape = list(local_tensor.size())
split_size = local_shape[0]
part_size = local_shape[gather_dim] # store original size
ctx.part_size = part_size
output = all_gather_tensor(local_tensor, group, async_op)
return torch.cat(output.split(split_size, dim=0), dim=gather_dim)
@staticmethod
def backward(ctx: Any, grad_output: Tensor) -> Any:
if ctx.grad_scaler:
grad_output = grad_output * ctx.sp_world_size
return (None, grad_output.split(ctx.part_size,
dim=ctx.gather_dim)[ctx.sp_rank].contiguous(), None, None, None, None)
def gather_outpus_and_unpad(x: Tensor,
gather_dim: int,
unpad_dim: int = None,
padding_size: int = 0,
grad_scaler: bool = True,
group: Optional[dist.ProcessGroup] = None):
group = get_ulysses_sequence_parallel_group() if group is None else group
sp_size = get_ulysses_sequence_parallel_world_size()
if group == None:
return x
x = Gather.apply(group, x, gather_dim, grad_scaler)
if unpad_dim is not None:
assert isinstance(padding_size, int), 'padding size is not given or is not an integer'
if padding_size == 0:
return x
x = _unpad_tensor(x, unpad_dim, padding_size)
return x
def ulysses_pad_and_slice_inputs(input_ids_rmpad: torch.Tensor,
position_ids_rmpad: Optional[torch.Tensor] = None,
sp_size: int = 1):
"""
Pad and slice input_ids to be divisible by sp_size
Pad position_ids to be divisible by sp_size.
Note both input_ids_rmpad and position_ids_rmpad will be padded,
but only input_ids will be sliced.
The is the utility of pre-forward for ulysses sequence parallelism
Args:
input_ids_rmpad: shape of [bsz, seqlen]
position_ids_rmpad: shape of [bsz, seqlen], where bsz must be 1
sp_size (int): ulysses sequence parallelism size
Returns:
torch.Tensor: padded and sliced input_ids
torch.Tensor: padded and sliced position_ids
int: pad size
"""
if position_ids_rmpad is not None:
assert position_ids_rmpad.size(0) == 1
assert input_ids_rmpad.size(1) == position_ids_rmpad.size(1)
if sp_size <= 1:
return input_ids_rmpad, position_ids_rmpad, 0
_, total_seq_len = input_ids_rmpad.shape
pad_size = (sp_size - total_seq_len % sp_size) % sp_size
if pad_size > 0:
input_ids_rmpad = torch.nn.functional.pad(input_ids_rmpad, (0, pad_size), value=0)
if position_ids_rmpad is not None:
pad_pos_ids = torch.arange(pad_size, device=position_ids_rmpad.device).unsqueeze(0)
position_ids_rmpad = torch.cat((position_ids_rmpad, pad_pos_ids), dim=-1)
# we don't need to slice position ids
input_ids_rmpad = slice_input_tensor(input_ids_rmpad, dim=1, padding=False)
return input_ids_rmpad, position_ids_rmpad, pad_size
================================================
FILE: verl/version/version
================================================
0.2.0.dev
================================================
FILE: verl/workers/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: verl/workers/actor/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import BasePPOActor
from .dp_actor import DataParallelPPOActor
__all__ = ["BasePPOActor", "DataParallelPPOActor"]
================================================
FILE: verl/workers/actor/base.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The base class for Actor
"""
from abc import ABC, abstractmethod
from typing import Iterable, Dict
from verl import DataProto
import torch
__all__ = ['BasePPOActor']
class BasePPOActor(ABC):
def __init__(self, config):
"""The base class for PPO actor
Args:
config (DictConfig): a config passed to the PPOActor. We expect the type to be
DictConfig (https://omegaconf.readthedocs.io/), but it can be any namedtuple in general.
"""
super().__init__()
self.config = config
@abstractmethod
def compute_log_prob(self, data: DataProto) -> torch.Tensor:
"""Compute logits given a batch of data.
Args:
data (DataProto): a batch of data represented by DataProto. It must contain key ```input_ids```,
```attention_mask``` and ```position_ids```.
Returns:
DataProto: a DataProto containing the key ```log_probs```
"""
pass
@abstractmethod
def update_policy(self, data: DataProto) -> Dict:
"""Update the policy with an iterator of DataProto
Args:
data (DataProto): an iterator over the DataProto that returns by
```make_minibatch_iterator```
Returns:
Dict: a dictionary contains anything. Typically, it contains the statistics during updating the model
such as ```loss```, ```grad_norm```, etc,.
"""
pass
================================================
FILE: verl/workers/actor/dp_actor.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Single Process Actor
"""
import itertools
from typing import Iterable, Tuple
import torch
from torch import nn
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from verl import DataProto
from verl.trainer.ppo import core_algos
from verl.workers.actor import BasePPOActor
from verl.utils.py_functional import append_to_dict
from verl.utils.torch_functional import logprobs_from_logits, masked_mean
from verl.utils.ulysses import ulysses_pad_and_slice_inputs, gather_outpus_and_unpad
from verl.utils.seqlen_balancing import rearrange_micro_batches, get_reverse_idx
import verl.utils.torch_functional as verl_F
from flash_attn.bert_padding import pad_input, unpad_input, rearrange, index_first_axis
__all__ = ['DataParallelPPOActor']
class DataParallelPPOActor(BasePPOActor):
def __init__(
self,
config,
actor_module: nn.Module,
actor_optimizer: torch.optim.Optimizer = None,
):
"""When optimizer is None, it is Reference Policy"""
super().__init__(config)
self.actor_module = actor_module
self.actor_optimizer = actor_optimizer
self.use_remove_padding = self.config.get('use_remove_padding', False)
print(f'Actor use_remove_padding={self.use_remove_padding}')
self.ulysses_sequence_parallel_size = self.config.ulysses_sequence_parallel_size
self.use_ulysses_sp = self.ulysses_sequence_parallel_size > 1
self.compute_entropy_from_logits = torch.compile(verl_F.entropy_from_logits, dynamic=True)
def _forward_micro_batch(self, micro_batch, temperature) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Returns:
entropy: # (bs, response_len)
log_probs: # (bs, response_len)
"""
response_length = micro_batch['responses'].size(-1)
multi_modal_inputs = {}
if 'multi_modal_inputs' in micro_batch:
for key in micro_batch['multi_modal_inputs'][0].keys():
multi_modal_inputs[key] = torch.cat([inputs[key] for inputs in micro_batch['multi_modal_inputs']],
dim=0)
with torch.autocast(device_type='cuda', dtype=torch.bfloat16):
input_ids = micro_batch['input_ids']
batch_size, seqlen = input_ids.shape
attention_mask = micro_batch['attention_mask']
position_ids = micro_batch['position_ids']
if position_ids.dim() == 3: # qwen2vl mrope
position_ids = position_ids.transpose(0, 1) # (bsz, 3, seqlen) -> (3, bsz, seqlen)
if self.use_remove_padding:
input_ids_rmpad, indices, *_ = unpad_input(input_ids.unsqueeze(-1),
attention_mask) # input_ids_rmpad (total_nnz, ...)
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz)
# unpad the position_ids to align the rotary
if position_ids.dim() == 3:
position_ids_rmpad = index_first_axis(rearrange(position_ids, "c b s ... -> (b s) c ..."),
indices).transpose(0, 1).unsqueeze(
1) # (3, bsz, seqlen) -> (3, 1, bsz * seqlen)
else:
position_ids_rmpad = index_first_axis(rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."),
indices).transpose(0, 1)
# for compute the log_prob
input_ids_rmpad_rolled = torch.roll(input_ids_rmpad, shifts=-1, dims=1) # (1, total_nnz)
# pad and slice the inputs if sp > 1
if self.use_ulysses_sp:
input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs(input_ids_rmpad, \
position_ids_rmpad, \
sp_size=self.ulysses_sequence_parallel_size)
input_ids_rmpad_rolled, _, _ = ulysses_pad_and_slice_inputs(input_ids_rmpad_rolled, None,
self.ulysses_sequence_parallel_size)
input_ids_rmpad_rolled = input_ids_rmpad_rolled.squeeze(0) # ((total_nnz / sp) + pad)
# only pass input_ids and position_ids to enable flash_attn_varlen
output = self.actor_module(input_ids=input_ids_rmpad,
attention_mask=None,
position_ids=position_ids_rmpad,
**multi_modal_inputs,
use_cache=False) # prevent model thinks we are generating
logits_rmpad = output.logits.squeeze(0) # (total_nnz, vocab_size)
logits_rmpad.div_(temperature)
# compute entropy
entropy_rmpad = self.compute_entropy_from_logits(logits_rmpad) # ((total_nnz / sp) + pad)
# if use_sp: ((total_nnz / sp) + pad) ; if not use_sp: (batch, seqlen)
log_probs = logprobs_from_logits(logits=logits_rmpad, labels=input_ids_rmpad_rolled)
# gather log_prob if sp > 1
if self.use_ulysses_sp:
# gather and unpad for the ulysses sp
log_probs = gather_outpus_and_unpad(log_probs, gather_dim=0, unpad_dim=0, padding_size=pad_size)
entropy_rmpad = gather_outpus_and_unpad(entropy_rmpad,
gather_dim=0,
unpad_dim=0,
padding_size=pad_size)
# pad back to (bsz, seqlen)
full_entropy = pad_input(hidden_states=entropy_rmpad.unsqueeze(-1),
indices=indices,
batch=batch_size,
seqlen=seqlen)
full_log_probs = pad_input(hidden_states=log_probs.unsqueeze(-1),
indices=indices,
batch=batch_size,
seqlen=seqlen)
# only return response part:
entropy = full_entropy.squeeze(-1)[:, -response_length - 1:-1] # (bsz, response_length)
log_probs = full_log_probs.squeeze(-1)[:, -response_length - 1:-1] # (bsz, response_length)
else: # not using rmpad and no ulysses sp
output = self.actor_module(input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
**multi_modal_inputs,
use_cache=False) # prevent model thinks we are generating
logits = output.logits
logits.div_(temperature)
logits = logits[:, -response_length - 1:-1, :] # (bsz, response_length, vocab_size)
log_probs = logprobs_from_logits(logits, micro_batch['responses'])
entropy = verl_F.entropy_from_logits(logits) # (bsz, response_length)
return entropy, log_probs
def _optimizer_step(self):
assert self.config.grad_clip is not None
if isinstance(self.actor_module, FSDP):
grad_norm = self.actor_module.clip_grad_norm_(max_norm=self.config.grad_clip)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(self.actor_module.parameters(), max_norm=self.config.grad_clip)
self.actor_optimizer.step()
return grad_norm
def compute_log_prob(self, data: DataProto) -> torch.Tensor:
"""Compute the log probability of the responses given input_ids, attention_mask and position_ids
Args:
data (DataProto): a DataProto containing keys
``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. Note that input_ids is the
concatenation of prompt and response. Note that ``sequence_length = prompt_length + response_length``.
``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64.
``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64.
``responses``: tensor of shape [batch_size, response_length]. torch.int64.
Returns:
torch.Tensor: the log_prob tensor
"""
# set to eval
self.actor_module.eval()
micro_batch_size = data.meta_info['micro_batch_size']
temperature = data.meta_info['temperature'] # temperature must be in the data.meta_info to avoid slient error
use_dynamic_bsz = data.meta_info['use_dynamic_bsz']
select_keys = ['responses', 'input_ids', 'attention_mask', 'position_ids']
batch = data.select(batch_keys=select_keys).batch
has_multi_modal_inputs = 'multi_modal_inputs' in data.non_tensor_batch.keys()
if has_multi_modal_inputs:
num_micro_batches = data.batch.batch_size[0] // micro_batch_size
non_tensor_select_keys = ['multi_modal_inputs']
micro_batches = data.select(select_keys, non_tensor_select_keys).chunk(num_micro_batches)
elif use_dynamic_bsz:
# split using dynamic bsz
max_token_len = data.meta_info['max_token_len'] * self.ulysses_sequence_parallel_size
micro_batches, indices = rearrange_micro_batches(batch=batch, max_token_len=max_token_len)
else:
micro_batches = batch.split(micro_batch_size)
log_probs_lst = []
for micro_batch in micro_batches:
if isinstance(micro_batch, DataProto):
micro_batch = {**micro_batch.batch, **micro_batch.non_tensor_batch}
with torch.no_grad():
_, log_probs = self._forward_micro_batch(micro_batch, temperature=temperature)
log_probs_lst.append(log_probs)
log_probs = torch.concat(log_probs_lst, dim=0)
if use_dynamic_bsz:
indices = list(itertools.chain.from_iterable(indices))
assert len(indices) == log_probs.size(0), f"{len(indices)} vs. {log_probs.size()}"
revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long)
log_probs = log_probs[revert_indices]
return log_probs
def update_policy(self, data: DataProto):
# make sure we are in training mode
self.actor_module.train()
temperature = data.meta_info['temperature'] # temperature must be in the data.meta_info to avoid slient error
select_keys = ['responses', 'input_ids', 'attention_mask', 'tool_output_masks', 'position_ids', 'old_log_probs', 'advantages']
if self.config.use_kl_loss:
select_keys.append('ref_log_prob')
batch = data.select(batch_keys=select_keys).batch
has_multi_modal_inputs = 'multi_modal_inputs' in data.non_tensor_batch.keys()
# Split to make minibatch iterator for updating the actor
# See PPO paper for details. https://arxiv.org/abs/1707.06347
if has_multi_modal_inputs:
num_mini_batches = data.batch.batch_size[0] // self.config.ppo_mini_batch_size
non_tensor_select_keys = ['multi_modal_inputs']
dataloader = data.select(select_keys, non_tensor_select_keys).chunk(num_mini_batches)
else:
dataloader = batch.split(self.config.ppo_mini_batch_size)
metrics = {}
for epoch in range(self.config.ppo_epochs):
for batch_idx, data in enumerate(dataloader):
# split batch into micro_batches
mini_batch = data
if has_multi_modal_inputs:
self.gradient_accumulation = self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu
num_micro_batches = mini_batch.batch.batch_size[0] // self.config.ppo_micro_batch_size_per_gpu
micro_batches = data.select(select_keys, non_tensor_select_keys).chunk(num_micro_batches)
elif self.config.use_dynamic_bsz:
max_token_len = self.config.ppo_max_token_len_per_gpu * self.ulysses_sequence_parallel_size
micro_batches, _ = rearrange_micro_batches(batch=mini_batch, max_token_len=max_token_len)
else:
self.gradient_accumulation = self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu
# split batch into micro_batches
micro_batches = mini_batch.split(self.config.ppo_micro_batch_size_per_gpu)
self.actor_optimizer.zero_grad()
for data in micro_batches:
if isinstance(data, DataProto):
data = {**data.batch.cuda(), **data.non_tensor_batch}
else:
data = data.cuda() # actor device is cpu when using offload
responses = data['responses']
response_length = responses.size(1)
attention_mask = data['attention_mask']
tool_output_masks = data['tool_output_masks'] if 'tool_output_masks' in data else None
response_mask = attention_mask[:, -response_length:]
response_mask = response_mask & tool_output_masks
old_log_prob = data['old_log_probs']
advantages = data['advantages']
clip_ratio = self.config.clip_ratio
entropy_coeff = self.config.entropy_coeff
# all return: (bsz, response_length)
entropy, log_prob = self._forward_micro_batch(micro_batch=data, temperature=temperature)
pg_loss, pg_clipfrac, ppo_kl = core_algos.compute_policy_loss(old_log_prob=old_log_prob,
log_prob=log_prob,
advantages=advantages,
eos_mask=response_mask,
cliprange=clip_ratio)
# compute entropy loss from entropy
entropy_loss = verl_F.masked_mean(entropy, response_mask)
# compute policy loss
policy_loss = pg_loss - entropy_loss * entropy_coeff
if self.config.use_kl_loss:
ref_log_prob = data['ref_log_prob']
# compute kl loss
kld = core_algos.kl_penalty(logprob=log_prob,
ref_logprob=ref_log_prob,
kl_penalty=self.config.kl_loss_type)
kl_loss = masked_mean(kld, response_mask)
policy_loss = policy_loss + kl_loss * self.config.kl_loss_coef
metrics['actor/kl_loss'] = kl_loss.detach().item()
metrics['actor/kl_coef'] = self.config.kl_loss_coef
if self.config.use_dynamic_bsz:
# relative to the dynamic bsz
loss = policy_loss * (len(data) / self.config.ppo_mini_batch_size)
else:
loss = policy_loss / self.gradient_accumulation
loss.backward()
data = {
'actor/entropy_loss': entropy_loss.detach().item(),
'actor/pg_loss': pg_loss.detach().item(),
'actor/pg_clipfrac': pg_clipfrac.detach().item(),
'actor/ppo_kl': ppo_kl.detach().item(),
}
append_to_dict(metrics, data)
grad_norm = self._optimizer_step()
data = {'actor/grad_norm': grad_norm.detach().item()}
append_to_dict(metrics, data)
self.actor_optimizer.zero_grad()
return metrics
================================================
FILE: verl/workers/actor/megatron_actor.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Megatron Actor.
In megatron actor, the differences are:
1. We only make minibatch
Note that our model doesn't have to be `MegatronModule` because we don't share embedding in the last layer
"""
import importlib
from functools import partial
from packaging.version import Version
from typing import Iterable, Dict
import torch
from torch import nn
import torch.distributed
# from megatron import get_args
from megatron.core.optimizer import OptimizerConfig
from megatron.core import parallel_state as mpu
from megatron.core import ModelParallelConfig
from verl.utils.megatron_utils import get_model_config
from megatron.core.pipeline_parallel import get_forward_backward_func
from megatron.core.distributed import finalize_model_grads
# from megatron.core.optimizer import DistributedOptimizer
from megatron.core.optimizer import DistributedOptimizer
from omegaconf import OmegaConf
from verl.utils.megatron.tensor_parallel import vocab_parallel_compute_entropy_loss, vocab_parallel_log_probs_from_logits
from verl.utils.megatron.pipeline_parallel import (compute_transformers_input_shapes, make_batch_generator)
from verl import DataProto
from verl.trainer.ppo import core_algos
from verl.workers.actor import BasePPOActor
from verl.utils.py_functional import append_to_dict
from verl.utils.torch_functional import logprobs_from_logits, broadcast_dict_tensor, split_dict_tensor_into_batches
__all__ = ['MegatronPPOActor']
class MegatronPPOActor(BasePPOActor):
def __init__(self, config, model_config, megatron_config: ModelParallelConfig, actor_module: nn.ModuleList,
actor_optimizer: DistributedOptimizer, actor_optimizer_config: OptimizerConfig):
"""MeagtronPPOActor class. This class implements the simple PPO logics when the model is built with Megatron.
Args:
config (OmegaConf): the basic config that contains the hyper-parameters of PPO Actor. It must contain
``ppo_micro_batch_size_per_gpu``: micro batch size when updating ppo.
``ppo_mini_batch_size``: minibatch size when updating ppo using the batch data.
``ppo_epochs``: number of epochs to update the actor using the batch data.
``shuffle``: whether to shuffle the data after each ppo epoch.
``clip_ratio``: clip ratio of the ppo algorithm. See https://arxiv.org/abs/1707.06347.
``entropy_coeff``: entropy coefficient of the PPO loss. See https://arxiv.org/abs/1707.06347.
model_config (OmegaConf): model configuration. It must contains ``model_config.vocab_size`` and
``model_config.hidden_size``
megatron_config (OmegaConf): megatron configuration. It must contains
``sequence_parallel_enabled``: whether the sequence parallel is enabled.
``param_dtype``: the dtype of the parameters.
``virtual_pipeline_model_parallel_size``: virtual pipeline model parallel size. a.k.a number of chunks in each pp stage.
actor_module (nn.ModuleList): actor module is a ModuleList that contains a list of nn.Module in this pp stage.
each nn.Module in this rank holds a vpp module chunk. See https://arxiv.org/pdf/2104.04473.pdf for more details.
The actor module has some constraints to follow in order to use the updating logics implemented here
1. It must implement unpad_input before any computation and pad_input after all the computation. Remove padding is an
optimization that removes the padding tokens. See unpad_input and pad_input function in flash-attn
(https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/bert_padding.py).
2. Each pp stage must return the hidden state with the same shape [total_nnz, 1, hidden_size],
where total_nnz is the number of valid tokens in this batch. If sequence parallel is enabled, the size
of the hidden state is [total_nnz // tp, 1, hidden_size].
actor_optimizer (DistributedOptimizer): currently, we only support DistributedOptimizer in Megatron. It implements
zero1 optimizer that shards the optimizer state across dp ranks.
>>> def megatron_actor_model_provider(pre_process, post_process):
>>> vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank()
>>> parallel_model = ParallelMistralForCausalLMRmPadPP(config=actor_model_config,
>>> megatron_config=megatron_config,
>>> pre_process=pre_process,
>>> post_process=post_process).cuda()
>>> return parallel_model
>>> from megatron.training import get_model
>>> from megatron.optimizer import get_megatron_optimizer
>>> actor_module = get_model(megatron_actor_model_provider, wrap_with_ddp=True)
>>> actor_module = nn.ModuleList(actor_module)
>>> actor_optimizer = get_megatron_optimizer(actor_module)
>>> actor = MegatronPPOActor(config=config,
>>> model_config=actor_model_config,
>>> megatron_config=megatron_config,
>>> actor_module=actor_module,
>>> actor_optimizer=actor_optimizer)
"""
super().__init__(config)
self._validate_config(config)
self.model_config = model_config
self.megatron_config = megatron_config
# self.megatron_args = get_args()
self.actor_module = actor_module
self.actor_optimizer: DistributedOptimizer = actor_optimizer
self.actor_optimizer_config = actor_optimizer_config
self.optimizer_step_args = OmegaConf.create({
'skip_grad': None,
'overlap_dp_param_comm': False,
'overlap_dp_grad_comm': False,
'gradient_accumulation_steps': 1,
'sequence_parallel': self.megatron_config.sequence_parallel,
'DDP_impl': 'local',
'layernorm_allreduce_bucket_threshold': 0,
'pipeline_model_parallel_split_rank': None,
'reduce_grads_use_alltoall': False
})
config = get_model_config(self.actor_module[0])
config.finalize_model_grads_func = finalize_model_grads
def _validate_config(self, config) -> None:
"""Validate config options not implemented for Megatron backend"""
assert config.get('ulysses_sequence_parallel_size', 1) == 1
def compute_log_prob(self, data: DataProto) -> torch.Tensor:
"""Compute the log probability of the responses given input_ids, attention_mask and position_ids
Args:
data (DataProto): a DataProto containing keys
``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64. Note that input_ids is the
concatenation of prompt and response. Note that ``sequence_length = prompt_length + response_length``.
``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64.
``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64.
``responses``: tensor of shape [batch_size, response_length]. torch.int64.
Returns:
DataProto: torch.Tensor: the log_prob tensor
"""
data.batch = data.batch.contiguous()
def compute_logprobs_fn(output, data):
response = data['responses']
response_length = response.size(1)
logits = output['logits']
logits = logits[:, -response_length - 1:-1].contiguous()
log_probs = vocab_parallel_log_probs_from_logits(logits, response)
return {'log_probs': log_probs}
# We make recompute_old_log_prob by default here.
# TODO (zhangchi.usc1992): actually, this function should only return log_prob and this logic should be handled by user outside
recompute_old_log_prob = self.config.get('recompute_old_log_prob', True)
if recompute_old_log_prob or 'old_log_probs' not in data.batch.keys():
select_keys = ['responses', 'input_ids', 'attention_mask', 'position_ids']
batch = data.select(batch_keys=select_keys).batch
input_ids = batch['input_ids']
batch_size = input_ids.size(0)
response = batch['responses']
response_length = response.size(1)
with torch.no_grad():
output = self.forward_backward_batch(data, forward_only=True, post_process_fn=compute_logprobs_fn)
if mpu.is_pipeline_last_stage(ignore_virtual=True):
# only on last rank. It should be on every tp rank
log_probs = torch.cat([o['log_probs'] for o in output], dim=0) # (bs, seq_size)
log_probs = log_probs.to(torch.float32)
else:
log_probs = torch.empty(size=(batch_size, response_length),
dtype=torch.float32,
device=input_ids.device)
# broadcast across pp ranks
torch.distributed.broadcast(tensor=log_probs,
src=mpu.get_pipeline_model_parallel_last_rank(),
group=mpu.get_pipeline_model_parallel_group(),
async_op=False)
# add empty cache after each compute
torch.cuda.empty_cache()
return log_probs
def make_minibatch_iterator(self, data: DataProto) -> Iterable[DataProto]:
"""Make minibatch iterator for updating the actor
Args:
data (DataProto): a DataProto containing keys
``input_ids``: tensor of shape [batch_size, sequence_length]. torch.int64, where ``sequence_length = prompt_length + response_length``
``attention_mask``: tensor of shape [batch_size, sequence_length]. torch.int64
``position_ids``: tensor of shape [batch_size, sequence_length]. torch.int64
``responses``: tensor of shape [batch_size, response_length]. torch.int64. Note that responses = input_ids[:, -response_length:]
``old_log_probs``: tensor of shape [batch_size, response_length]. torch.float32. The log probability of responses.
``advantages``: tensor of shape [batch_size, response_length]. torch.float32. The advantages of responses.
See PPO paper for details. https://arxiv.org/abs/1707.06347
Returns:
"""
select_keys = ['responses', 'input_ids', 'attention_mask', 'position_ids', 'old_log_probs', 'advantages']
data = data.select(batch_keys=select_keys)
return data.make_iterator(mini_batch_size=self.config.ppo_mini_batch_size,
epochs=self.config.ppo_epochs,
dataloader_kwargs={'shuffle': self.config.shuffle})
def forward_backward_batch(self, data: DataProto, forward_only=False, post_process_fn=None):
"""
We assume:
- The model takes input: (input_ids, attention_mask, position_ids). No rmpad for the input
- The communication shape is (total_nnz_pad_to_sp // tp_size, 1, hidden_size) if sequence parallel is enabled
"""
# broadcast from last pp rank to all other pp ranks
# TODO: actually, we just need to control the sampling order.
broadcast_dict_tensor(data.batch,
src=mpu.get_pipeline_model_parallel_last_rank(),
group=mpu.get_pipeline_model_parallel_group())
# split into micro-batches
data.batch['attention_mask'] = data.batch['attention_mask'].to(bool)
if data.meta_info.get('micro_batch_size', None) is not None:
batch_size = data.meta_info['micro_batch_size']
else:
batch_size = self.config.ppo_micro_batch_size_per_gpu
batches = split_dict_tensor_into_batches(data.batch, batch_size=batch_size)
# compute input shapes for pp stages
input_shapes = compute_transformers_input_shapes(
batches,
meta_info={
'sequence_parallel': self.megatron_config.sequence_parallel,
'hidden_size': self.model_config.hidden_size
})
n_micro_batch = len(batches)
seq_len = batches[0]['input_ids'].shape[1]
forward_backward_func = get_forward_backward_func()
def loss_func(output, data, meta_info):
if forward_only:
if post_process_fn is None:
return 1.0, {'logits': output.logits}
else:
return 1.0, post_process_fn(output, data)
responses = data['responses']
response_length = responses.size(1)
attention_mask = data['attention_mask']
response_mask = attention_mask[:, -response_length:]
old_log_prob = data['old_log_probs']
advantages = data['advantages']
clip_ratio = meta_info['clip_ratio']
entropy_coeff = meta_info['entropy_coeff']
# compute policy loss
logits = output.logits
logits = logits[:, -response_length - 1:-1].contiguous()
logits_back = logits.clone()
log_prob = vocab_parallel_log_probs_from_logits(logits, responses)
logits = logits_back
pg_loss, pg_clipfrac, ppo_kl = core_algos.compute_policy_loss(old_log_prob=old_log_prob,
log_prob=log_prob,
advantages=advantages,
eos_mask=response_mask,
cliprange=clip_ratio)
entropy_loss = vocab_parallel_compute_entropy_loss(logits, eos_mask=response_mask)
policy_loss = pg_loss - entropy_loss * entropy_coeff
# return loss and stats
stats = {
'actor/entropy_loss': entropy_loss.detach().item(),
'actor/pg_loss': pg_loss.detach().item(),
'actor/pg_clipfrac': pg_clipfrac.detach().item(),
'actor/ppo_kl': ppo_kl.detach().item()
}
return policy_loss, stats
def forward_step(batch_iter, model):
batch = next(batch_iter)
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
position_ids = batch['position_ids']
output = model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids)
if forward_only:
meta_info = None
else:
meta_info = {'clip_ratio': self.config.clip_ratio, 'entropy_coeff': self.config.entropy_coeff}
return output, partial(loss_func, data=batch, meta_info=meta_info)
# batch should be a list of batches inside micro-batches
batch_generator = make_batch_generator(batches, vpp_size=len(self.actor_module))
# TODO: we may use the new schedule instead
# for flash-attn: (seq_len, batch_size, hidden_size) = (mbs*seq_len, 1, hidden_size)
if mpu.get_pipeline_model_parallel_world_size() > 1:
losses_reduced = forward_backward_func(
forward_step_func=forward_step,
data_iterator=batch_generator,
model=self.actor_module,
num_microbatches=n_micro_batch,
seq_length=batch_size * seq_len, # no use when input_shapes was set
micro_batch_size=1, # no use when input_shapes was set
forward_only=forward_only,
)
else:
losses_reduced = forward_backward_func(
forward_step_func=forward_step,
data_iterator=batch_generator,
model=self.actor_module,
num_microbatches=n_micro_batch,
seq_length=batch_size * seq_len, # in use for pp = 1
micro_batch_size=1, # in use for pp = 1
forward_only=forward_only,
)
# loss_reduces contains the stats returned from loss_func
return losses_reduced
def update_policy(self, dataloader: Iterable[DataProto]) -> Dict:
"""Update the policy with an iterator of DataProto
Args:
dataloader (Iterable[DataProto]): an iterator over the DataProto that returns by ``make_minibatch_iterator``
The keys of each data batch is described in the make_minibatch_iterator.
Returns:
Dict: a dictionary containing the statistics. Note that the statistics are only valid in the last pp stage
and users have to combine the output in each dp rank manually.
"""
metrics = {}
for data in dataloader:
# data = data.batch.to(self.actor_module.device)
self.actor_optimizer.zero_grad()
# use use_contiguous_buffers_in_local_ddp and no overlap_dp_param_comm
for chunk in self.actor_module:
# if use distributed optimizer, zero grad buffer will be handled by optimizer
chunk.zero_grad_buffer()
metric_micro_batch = self.forward_backward_batch(data)
for metric in metric_micro_batch:
append_to_dict(metrics, metric) # append the metric from this micro-batch to global metrics.
update_successful, grad_norm, num_zeros_in_grad = self.actor_optimizer.step()
if update_successful:
# allgather already execute in optimizer.step in new megatron
pass
else:
raise NotImplementedError
# add empty cache after each compute
torch.cuda.empty_cache()
return metrics
================================================
FILE: verl/workers/critic/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import BasePPOCritic
from .dp_critic import DataParallelPPOCritic
__all__ = ["BasePPOCritic", "DataParallelPPOCritic"]
================================================
FILE: verl/workers/critic/base.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Base class for a critic
"""
from abc import ABC, abstractmethod
import torch
from verl import DataProto
__all__ = ['BasePPOCritic']
class BasePPOCritic(ABC):
def __init__(self, config):
super().__init__()
self.config = config
@abstractmethod
def compute_values(self, data: DataProto) -> torch.Tensor:
"""Compute values"""
pass
@abstractmethod
def update_critic(self, data: DataProto):
"""Update the critic"""
pass
================================================
FILE: verl/workers/critic/dp_critic.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implement a multiprocess PPOCritic
"""
import itertools
from typing import Iterable
import torch
import torch.distributed
from torch import nn, optim
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from verl import DataProto
from verl.trainer.ppo import core_algos
from verl.workers.critic import BasePPOCritic
from verl.utils.py_functional import append_to_dict
from verl.utils.torch_functional import masked_mean
from verl.utils.ulysses import ulysses_pad_and_slice_inputs, gather_outpus_and_unpad
from verl.utils.seqlen_balancing import rearrange_micro_batches, get_reverse_idx
from flash_attn.bert_padding import pad_input, unpad_input, rearrange, index_first_axis
__all__ = ['DataParallelPPOCritic']
class DataParallelPPOCritic(BasePPOCritic):
def __init__(self, config, critic_module: nn.Module, critic_optimizer: optim.Optimizer):
super().__init__(config=config)
self.critic_module = critic_module
self.critic_optimizer = critic_optimizer
self.use_remove_padding = self.config.model.get('use_remove_padding', False)
print(f'Critic use_remove_padding={self.use_remove_padding}')
self.ulysses_sequence_parallel_size = self.config.get('ulysses_sequence_parallel_size', 1)
def _forward_micro_batch(self, micro_batch):
response_length = micro_batch['responses'].size(-1)
multi_modal_inputs = {}
if 'multi_modal_inputs' in micro_batch:
for key in micro_batch['multi_modal_inputs'][0].keys():
multi_modal_inputs[key] = torch.cat([inputs[key] for inputs in micro_batch['multi_modal_inputs']],
dim=0)
with torch.autocast(device_type='cuda', dtype=torch.bfloat16):
input_ids = micro_batch['input_ids']
batch, seqlen = input_ids.shape
attention_mask = micro_batch['attention_mask']
position_ids = micro_batch['position_ids']
if position_ids.dim() == 3: # qwen2vl mrope
position_ids = position_ids.transpose(0, 1)
if self.use_remove_padding:
input_ids_rmpad, indices, *_ = unpad_input(input_ids.unsqueeze(-1),
attention_mask) # input_ids_rmpad (total_nnz, ...)
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz)
# unpad the position_ids to align the rotary
if position_ids.dim() == 3:
position_ids_rmpad = index_first_axis(rearrange(position_ids, "c b s ... -> (b s) c ..."),
indices).transpose(0, 1).unsqueeze(
1) # (3, bsz, seqlen) -> (3, 1, bsz * seqlen)
else:
position_ids_rmpad = index_first_axis(rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."),
indices).transpose(0, 1)
# pad and slice the inputs if sp > 1
if self.ulysses_sequence_parallel_size > 1:
input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs(input_ids_rmpad, \
position_ids_rmpad, \
sp_size=self.ulysses_sequence_parallel_size)
# only pass input_ids and position_ids to enable flash_attn_varlen
output = self.critic_module(input_ids=input_ids_rmpad,
attention_mask=None,
position_ids=position_ids_rmpad,
**multi_modal_inputs,
use_cache=False) # prevent model thinks we are generating
values_rmpad = output.logits
values_rmpad = values_rmpad.squeeze(0) # (total_nnz)
# gather output if sp > 1
if self.ulysses_sequence_parallel_size > 1:
values_rmpad = gather_outpus_and_unpad(values_rmpad,
gather_dim=0,
unpad_dim=0,
padding_size=pad_size)
# pad it back
values = pad_input(values_rmpad, indices=indices, batch=batch, seqlen=seqlen).squeeze(-1)
values = values[:, -response_length - 1:-1]
else:
output = self.critic_module(input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
**multi_modal_inputs,
use_cache=False) # prevent model thinks we are generating
values = output.logits
values = values[:, -response_length - 1:-1].squeeze(-1)
return values
def _optimizer_step(self):
assert self.config.grad_clip is not None
if isinstance(self.critic_module, FSDP):
grad_norm = self.critic_module.clip_grad_norm_(self.config.grad_clip)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(self.critic_module.parameters(), max_norm=self.config.grad_clip)
self.critic_optimizer.step()
return grad_norm
def compute_values(self, data: DataProto) -> torch.Tensor:
self.critic_module.eval()
micro_batch_size = data.meta_info['micro_batch_size']
select_keys = ['responses', 'input_ids', 'attention_mask', 'position_ids']
batch = data.select(batch_keys=select_keys).batch
use_dynamic_bsz = data.meta_info['use_dynamic_bsz']
has_multi_modal_inputs = 'multi_modal_inputs' in data.non_tensor_batch.keys()
if has_multi_modal_inputs:
num_micro_batches = data.batch.batch_size[0] // micro_batch_size
non_tensor_select_keys = ['multi_modal_inputs']
micro_batches = data.select(select_keys, non_tensor_select_keys).chunk(num_micro_batches)
elif use_dynamic_bsz:
# split using dynamic bsz
max_token_len = data.meta_info['max_token_len'] * self.ulysses_sequence_parallel_size
micro_batches, indices = rearrange_micro_batches(batch=batch, max_token_len=max_token_len)
else:
micro_batches = batch.split(micro_batch_size)
values_lst = []
for micro_batch in micro_batches:
if isinstance(micro_batch, DataProto):
micro_batch = {**micro_batch.batch, **micro_batch.non_tensor_batch}
with torch.no_grad():
values = self._forward_micro_batch(micro_batch)
values_lst.append(values)
values = torch.concat(values_lst, dim=0)
responses = data.batch['responses']
attention_mask = data.batch['attention_mask']
response_length = responses.size(1)
values = values * attention_mask[:, -response_length - 1:-1]
if use_dynamic_bsz:
indices = list(itertools.chain.from_iterable(indices))
assert len(indices) == values.size(0), f"{len(indices)} vs. {values.size()}"
revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long)
values = values[revert_indices]
return values
def update_critic(self, data: DataProto):
# make sure we are in training mode
self.critic_module.train()
metrics = {}
select_keys = ['input_ids', 'responses', 'attention_mask', 'position_ids', 'values', 'returns']
batch = data.select(batch_keys=select_keys).batch
has_multi_modal_inputs = 'multi_modal_inputs' in data.non_tensor_batch.keys()
# Split to make minibatch iterator for updating the actor
# See PPO paper for details. https://arxiv.org/abs/1707.06347
if has_multi_modal_inputs:
num_mini_batches = data.batch.batch_size[0] // self.config.ppo_mini_batch_size
non_tensor_select_keys = ['multi_modal_inputs']
dataloader = data.select(select_keys, non_tensor_select_keys).chunk(num_mini_batches)
else:
dataloader = batch.split(self.config.ppo_mini_batch_size)
for epoch in range(self.config.ppo_epochs):
for batch_idx, data in enumerate(dataloader):
# split batch into micro_batches
mini_batch = data
if has_multi_modal_inputs:
num_micro_batches = mini_batch.batch.batch_size[0] // self.config.ppo_micro_batch_size_per_gpu
micro_batches = data.select(select_keys, non_tensor_select_keys).chunk(num_micro_batches)
elif self.config.use_dynamic_bsz:
max_token_len = self.config.ppo_max_token_len_per_gpu * self.ulysses_sequence_parallel_size
micro_batches, _ = rearrange_micro_batches(batch=mini_batch, max_token_len=max_token_len)
else:
micro_batches = mini_batch.split(self.config.ppo_micro_batch_size_per_gpu)
self.gradient_accumulation = self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu
self.critic_optimizer.zero_grad()
for data in micro_batches:
if isinstance(data, DataProto):
data = {**data.batch.cuda(), **data.non_tensor_batch}
else:
data = data.cuda() # critic device is cpu when using offload
input_ids = data['input_ids']
responses = data['responses']
attention_mask = data['attention_mask']
position_ids = data['position_ids']
values = data['values']
returns = data['returns']
response_length = responses.size(1)
eos_mask = attention_mask[:, -response_length - 1:-1]
vpreds = self._forward_micro_batch(data)
# assert not torch.any(torch.isnan(vpreds)).item()
vf_loss, vf_clipfrac = core_algos.compute_value_loss(vpreds=vpreds,
values=values,
returns=returns,
eos_mask=eos_mask,
cliprange_value=self.config.cliprange_value)
if self.config.use_dynamic_bsz:
# relative to the dynamic bsz
loss = vf_loss * (len(data) / self.config.ppo_mini_batch_size)
else:
loss = vf_loss / self.gradient_accumulation
loss.backward()
data = {
'critic/vf_loss': vf_loss.detach().item(),
'critic/vf_clipfrac': vf_clipfrac.detach().item(),
'critic/vpred_mean': masked_mean(vpreds, eos_mask).detach().item(),
}
append_to_dict(metrics, data)
grad_norm = self._optimizer_step()
data = {'critic/grad_norm': grad_norm.detach().item()}
append_to_dict(metrics, data)
self.critic_optimizer.zero_grad()
return metrics
================================================
FILE: verl/workers/critic/megatron_critic.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implement a multiprocess PPOCritic
"""
import importlib
from functools import partial
from packaging.version import Version
from typing import Iterable
import torch
import torch.distributed
from omegaconf import OmegaConf
from torch import nn
from verl import DataProto
from verl.trainer.ppo import core_algos
from verl.workers.critic import BasePPOCritic
from verl.utils.megatron.pipeline_parallel import (compute_transformers_input_shapes, make_batch_generator)
from verl.utils.py_functional import append_to_dict
from verl.utils.torch_dtypes import PrecisionType
from verl.utils.torch_functional import masked_mean, broadcast_dict_tensor, split_dict_tensor_into_batches
from verl.utils.megatron import sequence_parallel as sp_utils
from megatron.core.optimizer import OptimizerConfig
from megatron.core import parallel_state as mpu
from megatron.core.pipeline_parallel import get_forward_backward_func
from megatron.core.optimizer import DistributedOptimizer
class MegatronPPOCritic(BasePPOCritic):
def __init__(self, config, model_config, megatron_config, critic_module: nn.ModuleList,
critic_optimizer: DistributedOptimizer, critic_optimizer_config: OptimizerConfig):
super().__init__(config=config)
self._validate_config(config)
self.model_config = model_config
self.megatron_config = megatron_config
self.critic_module = critic_module
self.critic_optimizer = critic_optimizer
self.critic_optimizer_config = critic_optimizer_config
# we create a separate nametuple for optimizer step so that global args won't affect it.
self.optimizer_step_args = OmegaConf.create({
'skip_grad': None,
'overlap_dp_param_comm': False,
'overlap_dp_grad_comm': False,
'gradient_accumulation_steps': 1,
'sequence_parallel': self.megatron_config.sequence_parallel,
'DDP_impl': 'local',
'layernorm_allreduce_bucket_threshold': 0,
'pipeline_model_parallel_split_rank': None,
'reduce_grads_use_alltoall': False
})
if self.config.kl_ctrl.type == 'fixed':
self.kl_ctrl = core_algos.FixedKLController(kl_coef=self.config.kl_ctrl.kl_coef)
elif self.config.kl_ctrl.type == 'adaptive':
assert self.config.kl_ctrl.horizon > 0, f'horizon must be larger than 0. Got {self.config.kl_ctrl.horizon}'
self.kl_ctrl = core_algos.AdaptiveKLController(init_kl_coef=self.config.kl_ctrl.kl_coef,
target_kl=self.config.kl_ctrl.target_kl,
horizon=self.config.kl_ctrl.horizon)
else:
raise NotImplementedError
def _validate_config(self, config) -> None:
"""Validate config options not implemented for Megatron backend"""
assert config.get('ulysses_sequence_parallel_size', 1) == 1
def compute_values(self, data: DataProto) -> DataProto:
# data.batch = data.batch.to(self.critic_module.module.device)
responses = data.batch['responses']
attention_mask = data.batch['attention_mask']
response_length = responses.size(1)
with torch.no_grad():
output = self.forward_backward_batch(data=data, forward_only=True)
if mpu.is_pipeline_last_stage(ignore_virtual=True):
# only on last rank. It should be on every tp rank
values = torch.cat([o['vpreds'] for o in output], dim=0) # (bs, seq_size, vocal_size)
values = values.to(torch.float32)
else:
values = torch.empty_like(attention_mask, dtype=torch.float32)
# each tp ranks should contain the same value
values = values * attention_mask
values = values[:, -response_length - 1:-1]
values = values.contiguous()
# sync among pp ranks
torch.distributed.broadcast(tensor=values,
src=mpu.get_pipeline_model_parallel_last_rank(),
group=mpu.get_pipeline_model_parallel_group())
# add empty cache after each compute
torch.cuda.empty_cache()
return values
def make_minibatch_iterator(self, data: DataProto) -> Iterable[DataProto]:
select_keys = ['input_ids', 'responses', 'attention_mask', 'position_ids', 'values', 'returns']
data = data.select(batch_keys=select_keys)
return data.make_iterator(mini_batch_size=self.config.ppo_mini_batch_size,
epochs=self.config.ppo_epochs,
dataloader_kwargs={'shuffle': self.config.shuffle})
def forward_backward_batch(self, data: DataProto, forward_only=False):
# broadcast from last pp rank to all other pp ranks
data.batch = data.batch.contiguous()
broadcast_dict_tensor(data.batch,
src=mpu.get_pipeline_model_parallel_last_rank(),
group=mpu.get_pipeline_model_parallel_group())
# split into micro-batches
data.batch['attention_mask'] = data.batch['attention_mask'].to(bool)
batches = split_dict_tensor_into_batches(data.batch, batch_size=self.config.ppo_micro_batch_size_per_gpu)
n_micro_batch = len(batches)
seq_len = batches[0]['input_ids'].shape[1]
# compute input shapes for pp stages
input_shapes = compute_transformers_input_shapes(
batches,
meta_info={
'sequence_parallel': self.megatron_config.sequence_parallel,
'hidden_size': self.model_config.hidden_size
})
forward_backward_func = get_forward_backward_func()
def loss_func(output, data, meta_info):
if forward_only:
return 1.0, {'vpreds': output.logits}
responses = data['responses']
attention_mask = data['attention_mask']
values = data['values']
returns = data['returns']
response_length = responses.size(1)
eos_mask = attention_mask[:, -response_length:]
cliprange_value = self.config.cliprange_value
vpreds = output.logits # (bs, sequence_length)
vpreds = vpreds[:, -response_length - 1:-1]
vf_loss, vf_clipfrac = core_algos.compute_value_loss(vpreds=vpreds,
values=values,
returns=returns,
eos_mask=eos_mask,
cliprange_value=cliprange_value)
stats = {
'critic/vf_loss': vf_loss.detach().item(),
'critic/vf_clipfrac': vf_clipfrac.detach().item(),
'critic/vpred_mean': masked_mean(vpreds, eos_mask).detach().item(),
}
return vf_loss, stats
def forward_step(batch_iter, model):
batch = next(batch_iter)
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
position_ids = batch['position_ids']
output = model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids)
return output, partial(loss_func, data=batch, meta_info={})
# batch should be a list of batches inside micro-batches
batch_generator = make_batch_generator(batches, vpp_size=len(self.critic_module))
# TODO: we may use the new schedule instead
# for flash-attn: (seq_len, batch_size, hidden_size) = (mbs*seq_len, 1, hidden_size)
if mpu.get_pipeline_model_parallel_world_size() > 1:
losses_reduced = forward_backward_func(
forward_step_func=forward_step,
data_iterator=batch_generator,
model=self.critic_module,
num_microbatches=n_micro_batch,
seq_length=self.config.ppo_micro_batch_size_per_gpu * seq_len, # no use when input_shapes was set
micro_batch_size=1, # no use when input_shapes was set
forward_only=forward_only,
)
else:
losses_reduced = forward_backward_func(
forward_step_func=forward_step,
data_iterator=batch_generator,
model=self.critic_module,
num_microbatches=n_micro_batch,
seq_length=self.config.ppo_micro_batch_size_per_gpu * seq_len, # in use for pp = 1
micro_batch_size=1, # in use for pp = 1
forward_only=forward_only,
)
# loss_reduces contains the stats returned from loss_func
return losses_reduced
def update_critic(self, dataloader: Iterable[DataProto]):
metrics = {}
for data in dataloader:
# data = data.batch.to(self.critic_module.device)
self.critic_optimizer.zero_grad()
# use use_contiguous_buffers_in_local_ddp and no overlap_dp_param_comm
for chunk in self.critic_module:
chunk.zero_grad_buffer()
metric_micro_batch = self.forward_backward_batch(data)
update_successful, grad_norm, num_zeros_in_grad = self.critic_optimizer.step()
if update_successful:
# allgather already execute in optimizer.step in new megatron
pass
else:
raise NotImplementedError
for metric in metric_micro_batch:
append_to_dict(metrics, metric) # append the metric from this micro-batch to global metrics.
# add empty cache after each compute
torch.cuda.empty_cache()
return metrics
================================================
FILE: verl/workers/fsdp_workers.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main entry point to run the PPO algorithm
"""
import logging
import os
import warnings
import torch
import torch.distributed
from torch.distributed.device_mesh import init_device_mesh
import verl.utils.torch_functional as verl_F
from omegaconf import DictConfig, open_dict
from verl import DataProto
from verl.single_controller.base import Worker
from verl.single_controller.base.decorator import register, Dispatch
from verl.utils import hf_tokenizer, hf_processor
from verl.utils.debug import log_gpu_memory_usage
from verl.utils.fs import copy_to_local
from verl.utils.fsdp_utils import get_fsdp_wrap_policy, init_fn, get_init_weight_context_manager
from verl.utils.fsdp_utils import offload_fsdp_optimizer, offload_fsdp_model_to_cpu, load_fsdp_optimizer, \
load_fsdp_model_to_gpu
from verl.utils.import_utils import import_external_libs
from verl.utils.model import compute_position_id_with_mask
from verl.utils.flops_counter import FlopsCounter
from verl.utils.checkpoint.fsdp_checkpoint_manager import FSDPCheckpointManager
from verl.workers.sharding_manager.fsdp_ulysses import FSDPUlyssesShardingManager
from codetiming import Timer
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv('VERL_PPO_LOGGING_LEVEL', 'WARN'))
def create_device_mesh(world_size, fsdp_size):
if fsdp_size < 0 or fsdp_size >= world_size:
device_mesh = init_device_mesh('cuda', mesh_shape=(world_size,), mesh_dim_names=['fsdp'])
else:
raise ValueError(
'HSDP is not supported yet because it produces incorrect results for now. Please set fsdp_size=-1')
assert world_size % fsdp_size == 0
device_mesh = init_device_mesh('cuda',
mesh_shape=(world_size // fsdp_size, fsdp_size),
mesh_dim_names=['ddp', 'fsdp'])
return device_mesh
def get_sharding_strategy(device_mesh):
from torch.distributed.fsdp import ShardingStrategy
if device_mesh.ndim == 1:
sharding_strategy = ShardingStrategy.FULL_SHARD
elif device_mesh.ndim == 2:
sharding_strategy = ShardingStrategy.HYBRID_SHARD
else:
raise NotImplementedError(f"Get device mesh ndim={device_mesh.ndim}, but only support 1 or 2")
return sharding_strategy
class ActorRolloutRefWorker(Worker):
"""
This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy
or a hybrid engine based on the config.rollout
"""
def __init__(self, config: DictConfig, role: str):
super().__init__()
self.config = config
import torch.distributed
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl")
# build device mesh for FSDP
world_size = torch.distributed.get_world_size()
# TODO(sgm): support FSDP hybrid shard for larger model
self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=self.config.actor.fsdp_config.fsdp_size)
# build device mesh for Ulysses Sequence Parallel
self.ulysses_device_mesh = None
self.ulysses_sequence_parallel_size = self.config.actor.get('ulysses_sequence_parallel_size', 1)
dp = world_size // self.ulysses_sequence_parallel_size
if self.ulysses_sequence_parallel_size > 1:
self.ulysses_device_mesh = init_device_mesh('cuda',
mesh_shape=(dp, self.ulysses_sequence_parallel_size),
mesh_dim_names=['dp', 'sp'])
self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh)
self.role = role
assert self.role in ['actor', 'rollout', 'ref', 'actor_rollout', 'actor_rollout_ref']
self._is_actor = self.role in ['actor', 'actor_rollout', 'actor_rollout_ref']
self._is_rollout = self.role in ['rollout', 'actor_rollout', 'actor_rollout_ref']
self._is_ref = self.role in ['ref', 'actor_rollout_ref']
self._is_offload_param = False
self._is_offload_optimizer = False
if self._is_actor:
self._is_offload_param = self.config.actor.fsdp_config.get('param_offload', False)
self._is_offload_optimizer = self.config.actor.fsdp_config.get('optimizer_offload', False)
elif self._is_ref:
# TODO: it seems that manual offload is slowly than FSDP offload
self._is_offload_param = self.config.ref.fsdp_config.get('param_offload', False)
# normalize config
if self._is_actor:
self.config.actor.ppo_mini_batch_size *= self.config.rollout.n
self.config.actor.ppo_mini_batch_size //= (self.device_mesh.shape[0] // self.ulysses_sequence_parallel_size)
# micro bsz
if self.config.actor.ppo_micro_batch_size is not None:
self.config.actor.ppo_micro_batch_size //= (self.device_mesh.shape[0] //
self.ulysses_sequence_parallel_size)
self.config.actor.ppo_micro_batch_size_per_gpu = self.config.actor.ppo_micro_batch_size
assert self.config.actor.ppo_mini_batch_size % self.config.actor.ppo_micro_batch_size_per_gpu == 0, \
f'normalized ppo_mini_batch_size {self.config.actor.ppo_mini_batch_size} should be divisible by ppo_micro_batch_size_per_gpu {self.config.actor.ppo_micro_batch_size_per_gpu}'
assert self.config.actor.ppo_mini_batch_size // self.config.actor.ppo_micro_batch_size_per_gpu > 0, \
f'normalized ppo_mini_batch_size {self.config.actor.ppo_mini_batch_size} should be larger than ppo_micro_batch_size_per_gpu {self.config.actor.ppo_micro_batch_size_per_gpu}'
# normalize rollout config
if self._is_rollout and self.config.rollout.log_prob_micro_batch_size is not None:
self.config.rollout.log_prob_micro_batch_size //= (self.device_mesh.shape[0] //
self.ulysses_sequence_parallel_size)
self.config.rollout.log_prob_micro_batch_size_per_gpu = self.config.rollout.log_prob_micro_batch_size
# normalize ref config
if self._is_ref and self.config.ref.log_prob_micro_batch_size is not None:
self.config.ref.log_prob_micro_batch_size //= (self.device_mesh.shape[0] //
self.ulysses_sequence_parallel_size)
self.config.ref.log_prob_micro_batch_size_per_gpu = self.config.ref.log_prob_micro_batch_size
def _build_model_optimizer(self,
model_path,
fsdp_config,
optim_config,
override_model_config,
use_remove_padding=False,
enable_gradient_checkpointing=False,
trust_remote_code=False,
use_liger=False,
role='actor'):
from verl.utils.model import print_model_size, update_model_config, get_generation_config
from verl.utils.torch_dtypes import PrecisionType
from transformers import AutoModelForCausalLM, AutoConfig, AutoModelForVision2Seq
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, ShardingStrategy, MixedPrecision, CPUOffload
from torch import optim
assert role in ['actor', 'ref']
log_gpu_memory_usage('Before init from HF AutoModel', logger=logger)
local_path = copy_to_local(model_path)
# note that we have to create model in fp32. Otherwise, the optimizer is in bf16, which is incorrect
# TODO(zhangchi.usc1992): 1. support create from random initialized model. 2. Support init with FSDP directly
self.tokenizer = hf_tokenizer(local_path, trust_remote_code=trust_remote_code)
self.processor = hf_processor(local_path, trust_remote_code=trust_remote_code)
torch_dtype = fsdp_config.get('model_dtype', None)
if torch_dtype is None:
torch_dtype = torch.float32 if self._is_actor else torch.bfloat16
else:
torch_dtype = PrecisionType.to_dtype(torch_dtype)
# override model kwargs
actor_model_config = AutoConfig.from_pretrained(local_path, trust_remote_code=trust_remote_code)
self.generation_config = get_generation_config(local_path, trust_remote_code=trust_remote_code)
if use_remove_padding:
from verl.models.registry import check_model_support_rmpad
check_model_support_rmpad(actor_model_config.model_type)
if use_remove_padding and self.ulysses_sequence_parallel_size > 1:
from verl.models.transformers.monkey_patch import apply_monkey_patch
apply_monkey_patch(actor_model_config, verbose=True)
override_config_kwargs = {
'bos_token_id': self.tokenizer.bos_token_id,
'eos_token_id': self.tokenizer.eos_token_id,
'pad_token_id': self.tokenizer.pad_token_id,
}
override_config_kwargs.update(override_model_config)
update_model_config(actor_model_config, override_config_kwargs=override_config_kwargs)
if self.rank == 0:
print(f'Model config after override: {actor_model_config}')
# NOTE(fix me): tie_word_embedding causes meta_tensor init to hang
init_context = get_init_weight_context_manager(use_meta_tensor=not actor_model_config.tie_word_embeddings)
with init_context(), warnings.catch_warnings():
warnings.simplefilter("ignore")
if type(actor_model_config) in AutoModelForVision2Seq._model_mapping.keys():
actor_module_class = AutoModelForVision2Seq
else:
actor_module_class = AutoModelForCausalLM
actor_module = actor_module_class.from_pretrained(pretrained_model_name_or_path=local_path,
torch_dtype=torch_dtype,
config=actor_model_config,
attn_implementation='flash_attention_2',
trust_remote_code=trust_remote_code)
# Apply Liger kernel to the model if use_liger is set to True
if use_liger:
from liger_kernel.transformers.monkey_patch import _apply_liger_kernel_to_instance
_apply_liger_kernel_to_instance(model=actor_module)
# some parameters may not in torch_dtype. TODO(zhangchi.usc1992) remove this after we switch to fsdp2
actor_module.to(torch_dtype)
if enable_gradient_checkpointing:
actor_module.gradient_checkpointing_enable(gradient_checkpointing_kwargs={'use_reentrant': False})
torch.distributed.barrier()
if self.rank == 0:
print_model_size(actor_module)
log_gpu_memory_usage('After init from HF AutoModel', logger=logger)
# We wrap FSDP for rollout as well
mixed_precision_config = fsdp_config.get('mixed_precision', None)
if mixed_precision_config is not None:
param_dtype = PrecisionType.to_dtype(mixed_precision_config.get('param_dtype', 'bf16'))
reduce_dtype = PrecisionType.to_dtype(mixed_precision_config.get('reduce_dtype', 'fp32'))
buffer_dtype = PrecisionType.to_dtype(mixed_precision_config.get('buffer_dtype', 'fp32'))
else:
param_dtype = torch.bfloat16
reduce_dtype = torch.float32
buffer_dtype = torch.float32
mixed_precision = MixedPrecision(param_dtype=param_dtype, reduce_dtype=reduce_dtype, buffer_dtype=buffer_dtype)
auto_wrap_policy = get_fsdp_wrap_policy(module=actor_module, config=fsdp_config.get('wrap_policy', None))
if self._is_rollout and self.config.rollout.name == 'hf':
# TODO(zhangchi.usc1992, shengguangming) fix me. Current, auto_wrap_policy causes HFRollout to hang in Gemma
auto_wrap_policy = None
print(f'wrap_policy: {auto_wrap_policy}')
fsdp_mesh = self.device_mesh
sharding_strategy = get_sharding_strategy(fsdp_mesh)
# TODO: add transformer policy
# We force reference policy to use CPUOffload to save memory.
# We force turn off CPUOffload for actor because it causes incorrect results when using grad accumulation
cpu_offload = None if role == 'actor' else CPUOffload(offload_params=True)
actor_module_fsdp = FSDP(
actor_module,
cpu_offload=cpu_offload,
param_init_fn=init_fn,
use_orig_params=False,
auto_wrap_policy=auto_wrap_policy,
device_id=torch.cuda.current_device(),
sharding_strategy=sharding_strategy, # zero3
mixed_precision=mixed_precision,
sync_module_states=True,
device_mesh=self.device_mesh,
forward_prefetch=False)
log_gpu_memory_usage('After Actor FSDP init', logger=logger)
# TODO: add more optimizer args into config
if role == 'actor':
from verl.utils.torch_functional import get_constant_schedule_with_warmup
actor_optimizer = optim.AdamW(actor_module_fsdp.parameters(),
lr=optim_config.lr,
betas=optim_config.get('betas', (0.9, 0.999)),
weight_decay=optim_config.get('weight_decay', 1e-2))
total_steps = optim_config.get('total_training_steps', 0)
num_warmup_steps_ratio = optim_config.get('lr_warmup_steps_ratio', 0.)
num_warmup_steps = int(num_warmup_steps_ratio * total_steps)
print(f'Total steps: {total_steps}, num_warmup_steps: {num_warmup_steps}')
actor_lr_scheduler = get_constant_schedule_with_warmup(optimizer=actor_optimizer,
num_warmup_steps=num_warmup_steps)
else:
actor_optimizer = None
actor_lr_scheduler = None
log_gpu_memory_usage('After actor optimizer init', logger=logger)
return actor_module_fsdp, actor_optimizer, actor_lr_scheduler, actor_model_config
def _build_rollout(self):
from torch.distributed.device_mesh import init_device_mesh
# TODO(sgm): support FSDP hybrid shard for larger model
infer_tp = self.config.rollout.tensor_model_parallel_size
dp = self.world_size // infer_tp
assert self.world_size % infer_tp == 0, f'rollout world_size: {self.world_size} is not divisible by infer_tp: {infer_tp}'
rollout_device_mesh = init_device_mesh('cuda', mesh_shape=(dp, infer_tp), mesh_dim_names=['dp', 'infer_tp'])
if self.config.rollout.name == 'hf':
from verl.workers.rollout import HFRollout
from verl.workers.sharding_manager import BaseShardingManager
rollout = HFRollout(module=self.actor_module_fsdp, config=self.config.rollout)
rollout_sharding_manager = BaseShardingManager()
# TODO: a sharding manager that do nothing?
elif self.config.rollout.name == 'vllm':
if self.config.rollout.use_fire_sampling:
from verl.workers.rollout.vllm_rollout import FIREvLLMRollout as vLLMRollout
from verl.workers.rollout.vllm_rollout import vllm_mode
else:
from verl.workers.rollout.vllm_rollout import vLLMRollout, vllm_mode
from verl.workers.sharding_manager import FSDPVLLMShardingManager
log_gpu_memory_usage('Before building vllm rollout', logger=None)
local_path = copy_to_local(self.config.model.path)
if vllm_mode == 'customized':
rollout = vLLMRollout(actor_module=self.actor_module_fsdp,
config=self.config.rollout,
tokenizer=self.tokenizer,
model_hf_config=self.actor_model_config)
elif vllm_mode == 'spmd':
rollout = vLLMRollout(model_path=local_path,
config=self.config.rollout,
tokenizer=self.tokenizer,
model_hf_config=self.actor_model_config,
device_mesh=rollout_device_mesh)
else:
raise NotImplementedError("vllm_mode must be 'customized' or 'spmd'")
log_gpu_memory_usage('After building vllm rollout', logger=None)
if torch.distributed.get_world_size() == 1:
self.config.rollout.load_format = 'dummy_hf'
rollout_sharding_manager = FSDPVLLMShardingManager(module=self.actor_module_fsdp,
inference_engine=rollout.inference_engine,
model_config=self.actor_model_config,
full_params='hf' in self.config.rollout.load_format,
device_mesh=rollout_device_mesh)
log_gpu_memory_usage('After building sharding manager', logger=None)
return rollout, rollout_sharding_manager
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def init_model(self):
from verl.workers.actor import DataParallelPPOActor
# This is used to import external_lib into the huggingface systems
import_external_libs(self.config.model.get('external_lib', None))
from omegaconf import OmegaConf
override_model_config = OmegaConf.to_container(self.config.model.get('override_config', OmegaConf.create()))
use_remove_padding = self.config.model.get('use_remove_padding', False)
if self._is_actor or self._is_rollout:
# we need the model for actor and rollout
if self._is_actor:
optim_config = self.config.actor.optim
fsdp_config = self.config.actor.fsdp_config
else:
optim_config = None
fsdp_config = OmegaConf.create()
self.actor_module_fsdp, self.actor_optimizer, self.actor_lr_scheduler, self.actor_model_config = self._build_model_optimizer(
model_path=self.config.model.path,
fsdp_config=fsdp_config,
optim_config=optim_config,
override_model_config=override_model_config,
use_remove_padding=use_remove_padding,
enable_gradient_checkpointing=self.config.model.get('enable_gradient_checkpointing', False),
trust_remote_code=self.config.model.get('trust_remote_code', False),
use_liger=self.config.model.get('use_liger', False),
role='actor')
# get the original unwrapped module
self.actor_module = self.actor_module_fsdp._fsdp_wrapped_module
if self._is_offload_optimizer:
offload_fsdp_optimizer(optimizer=self.actor_optimizer)
log_gpu_memory_usage('After offload actor optimizer during init', logger=logger)
# load from checkpoint
if self._is_actor:
OmegaConf.set_struct(self.config.actor, True)
with open_dict(self.config.actor):
self.config.actor.use_remove_padding = use_remove_padding
self.actor = DataParallelPPOActor(config=self.config.actor,
actor_module=self.actor_module_fsdp,
actor_optimizer=self.actor_optimizer)
if self._is_rollout:
self.rollout, self.rollout_sharding_manager = self._build_rollout()
if self._is_ref:
self.ref_module_fsdp = self._build_model_optimizer(model_path=self.config.model.path,
fsdp_config=self.config.ref.fsdp_config,
optim_config=None,
override_model_config=override_model_config,
use_remove_padding=use_remove_padding,
trust_remote_code=self.config.model.get(
'trust_remote_code', False),
use_liger=self.config.model.get('use_liger', False),
role='ref')[0]
OmegaConf.set_struct(self.config.ref, True)
with open_dict(self.config.ref):
self.config.ref.use_remove_padding = use_remove_padding
self.ref_policy = DataParallelPPOActor(config=self.config.ref, actor_module=self.ref_module_fsdp)
if self._is_actor:
self.flops_counter = FlopsCounter(self.actor_model_config)
self.checkpoint_manager = FSDPCheckpointManager(
model=self.actor_module_fsdp,
optimizer=self.actor.actor_optimizer,
lr_scheduler=self.actor_lr_scheduler,
processing_class=self.processor if self.processor is not None else self.tokenizer)
torch.cuda.empty_cache()
@register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
def update_actor(self, data: DataProto):
data = data.to('cuda')
assert self._is_actor
if self._is_offload_param:
load_fsdp_model_to_gpu(self.actor_module_fsdp)
if self._is_offload_optimizer:
load_fsdp_optimizer(optimizer=self.actor_optimizer, device_id=torch.cuda.current_device())
data.batch = data.batch.cuda()
log_gpu_memory_usage('Before update policy', logger=logger)
with self.ulysses_sharding_manager:
data = self.ulysses_sharding_manager.preprocess_data(data=data)
# perform training
with Timer(name='update_policy', logger=None) as timer:
metrics = self.actor.update_policy(data=data)
delta_time = timer.last
global_num_tokens = data.meta_info['global_token_num']
estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time)
metrics['mfu/actor'] = estimated_flops * self.config.actor.ppo_epochs / promised_flops / self.world_size
self.actor_lr_scheduler.step()
lr = self.actor_lr_scheduler.get_last_lr()[0]
metrics['actor/lr'] = lr
log_gpu_memory_usage('After update policy', logger=logger)
# TODO: here, we should return all metrics
output = DataProto(meta_info={'metrics': metrics})
output = self.ulysses_sharding_manager.postprocess_data(data=output)
output = output.to('cpu')
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
if self._is_offload_optimizer:
offload_fsdp_optimizer(optimizer=self.actor_optimizer)
torch.cuda.empty_cache()
return output
@register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
def generate_sequences(self, prompts: DataProto):
prompts = prompts.to('cuda')
assert self._is_rollout
if self._is_offload_param:
load_fsdp_model_to_gpu(self.actor_module_fsdp)
prompts.batch = prompts.batch.cuda()
meta_info = {
'eos_token_id':
self.generation_config.eos_token_id
if self.generation_config is not None else self.tokenizer.eos_token_id,
'pad_token_id':
self.generation_config.pad_token_id
if self.generation_config is not None else self.tokenizer.pad_token_id,
}
prompts.meta_info.update(meta_info)
with self.rollout_sharding_manager:
# after parameters sync with rollout, offload actor model to CPU
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
if self._is_offload_optimizer:
offload_fsdp_optimizer(optimizer=self.actor_optimizer)
log_gpu_memory_usage('After entering rollout sharding manager', logger=logger)
prompts = self.rollout_sharding_manager.preprocess_data(prompts)
output = self.rollout.generate_sequences(prompts=prompts)
log_gpu_memory_usage('After rollout generation', logger=logger)
output = self.rollout_sharding_manager.postprocess_data(output)
output = output.to('cpu')
# clear kv cache
torch.cuda.empty_cache()
log_gpu_memory_usage('After recompute log prob', logger=logger)
return output
@register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
def compute_log_prob(self, data: DataProto):
assert self._is_actor
if self._is_offload_param:
load_fsdp_model_to_gpu(self.actor_module_fsdp)
data = data.to('cuda')
# we should always recompute old_log_probs when it is HybridEngine
data.meta_info['micro_batch_size'] = self.config.rollout.log_prob_micro_batch_size_per_gpu
data.meta_info['max_token_len'] = self.config.rollout.log_prob_max_token_len_per_gpu
data.meta_info['use_dynamic_bsz'] = self.config.rollout.log_prob_use_dynamic_bsz
data.meta_info['temperature'] = self.config.rollout.temperature
# perform recompute log_prob
with self.ulysses_sharding_manager:
data = self.ulysses_sharding_manager.preprocess_data(data)
output = self.actor.compute_log_prob(data=data)
output = DataProto.from_dict(tensors={'old_log_probs': output},
meta_info={'temperature': self.config.rollout.temperature})
output = self.ulysses_sharding_manager.postprocess_data(output)
output = output.to('cpu')
# https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes
# unshard the root FSDP module
if self.world_size > 1:
self.actor.actor_module._handle.reshard(True)
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
# clear kv cache
torch.cuda.empty_cache()
log_gpu_memory_usage('After compute_log_prob', logger=logger)
return output
@register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
def compute_ref_log_prob(self, data: DataProto):
assert self._is_ref
data = data.to('cuda')
micro_batch_size = self.config.ref.log_prob_micro_batch_size_per_gpu
data.meta_info['micro_batch_size'] = micro_batch_size
data.meta_info['temperature'] = self.config.rollout.temperature
data.meta_info['max_token_len'] = self.config.ref.log_prob_max_token_len_per_gpu
data.meta_info['use_dynamic_bsz'] = self.config.ref.log_prob_use_dynamic_bsz
with self.ulysses_sharding_manager:
data = self.ulysses_sharding_manager.preprocess_data(data)
output = self.ref_policy.compute_log_prob(data=data)
output = DataProto.from_dict(tensors={'ref_log_prob': output})
output = self.ulysses_sharding_manager.postprocess_data(output)
output = output.to('cpu')
# https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes
# unshard the root FSDP module
if self.world_size > 1:
self.ref_policy.actor_module._handle.reshard(True)
torch.cuda.empty_cache()
return output
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, remove_previous_ckpt=False):
# only support save and load ckpt for actor
assert self._is_actor
import torch
if self._is_offload_param:
load_fsdp_model_to_gpu(self.actor_module_fsdp)
self.checkpoint_manager.save_checkpoint(local_path=local_path,
hdfs_path=hdfs_path,
global_step=global_step,
remove_previous_ckpt=remove_previous_ckpt)
torch.distributed.barrier()
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_checkpoint(self, path, del_local_after_load=False):
if self._is_offload_param:
load_fsdp_model_to_gpu(self.actor_module_fsdp)
self.checkpoint_manager.load_checkpoint(path=path, del_local_after_load=del_local_after_load)
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.actor_module_fsdp)
if self._is_offload_optimizer:
offload_fsdp_optimizer(self.actor_optimizer)
class CriticWorker(Worker):
def __init__(self, config):
super().__init__()
import torch.distributed
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl")
self.config = config
# build device mesh for Ulysses Sequence Parallel
world_size = torch.distributed.get_world_size()
from torch.distributed.device_mesh import init_device_mesh
fsdp_size = self.config.model.fsdp_config.fsdp_size
self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=fsdp_size)
self.ulysses_device_mesh = None
self.ulysses_sequence_parallel_size = self.config.get('ulysses_sequence_parallel_size', 1)
dp = world_size // self.ulysses_sequence_parallel_size
if self.ulysses_sequence_parallel_size > 1:
self.ulysses_device_mesh = init_device_mesh('cuda',
mesh_shape=(dp, self.ulysses_sequence_parallel_size),
mesh_dim_names=['dp', 'sp'])
self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh)
# set FSDP offload params
self._is_offload_param = self.config.model.fsdp_config.param_offload
self._is_offload_optimizer = self.config.model.fsdp_config.optimizer_offload
# normalize config
self.config.ppo_mini_batch_size //= (torch.distributed.get_world_size() // self.ulysses_sequence_parallel_size)
if self.config.ppo_micro_batch_size is not None:
self.config.ppo_micro_batch_size //= (torch.distributed.get_world_size() //
self.ulysses_sequence_parallel_size)
self.config.forward_micro_batch_size //= (torch.distributed.get_world_size() //
self.ulysses_sequence_parallel_size)
self.config.ppo_micro_batch_size_per_gpu = self.config.ppo_micro_batch_size
self.config.forward_micro_batch_size_per_gpu = self.config.forward_micro_batch_size
assert self.config.ppo_mini_batch_size % self.config.ppo_micro_batch_size_per_gpu == 0, \
f'normalized ppo_mini_batch_size {self.config.ppo_mini_batch_size} should be divisible by ppo_micro_batch_size_per_gpu {self.config.ppo_micro_batch_size_per_gpu}'
assert self.config.ppo_mini_batch_size // self.config.ppo_micro_batch_size_per_gpu > 0, \
f'normalized ppo_mini_batch_size {self.config.ppo_mini_batch_size} should be larger than ppo_micro_batch_size_per_gpu {self.config.ppo_micro_batch_size_per_gpu}'
def _build_critic_model_optimizer(self, config):
# the following line is necessary
from verl.utils.model import LambdaLayer, print_model_size, squeeze
from verl.utils.torch_dtypes import PrecisionType
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, ShardingStrategy, MixedPrecision
from torch import optim
local_path = copy_to_local(config.model.path)
# note that the tokenizer between actor and critic may be different. So override tokenizer info with actor info
# using random initialized model from any architecture. May not be the same as Actor.
tokenizer_path = copy_to_local(config.model.tokenizer_path)
self.tokenizer = hf_tokenizer(tokenizer_path, trust_remote_code=config.model.get('trust_remote_code', False))
self.processor = hf_processor(tokenizer_path, trust_remote_code=config.model.get('trust_remote_code', False))
from omegaconf import OmegaConf
override_config = OmegaConf.to_container(self.config.model.get('override_config', OmegaConf.create()))
override_config_kwargs = {
'bos_token_id': self.tokenizer.bos_token_id,
'eos_token_id': self.tokenizer.eos_token_id,
'pad_token_id': self.tokenizer.pad_token_id,
}
override_config_kwargs.update(override_config)
if self.rank == 0:
print(f'Critic overriding config {override_config_kwargs}')
torch_dtype = self.config.model.fsdp_config.get('model_dtype', 'fp32')
torch_dtype = PrecisionType.to_dtype(torch_dtype)
from transformers import AutoConfig, AutoModelForTokenClassification
from torch import nn
trust_remote_code = False
critic_model_config = AutoConfig.from_pretrained(local_path, trust_remote_code=trust_remote_code)
critic_model_config.num_labels = 1
use_remove_padding = config.model.get('use_remove_padding', False)
if use_remove_padding:
from verl.models.registry import check_model_support_rmpad
check_model_support_rmpad(critic_model_config.model_type)
if use_remove_padding and self.ulysses_sequence_parallel_size > 1:
from verl.models.transformers.monkey_patch import apply_monkey_patch
apply_monkey_patch(critic_model_config, verbose=True)
init_context = get_init_weight_context_manager()
with init_context(), warnings.catch_warnings():
warnings.simplefilter("ignore")
setattr(critic_model_config, 'classifier_dropout', 0.)
setattr(critic_model_config, 'hidden_dropout', '0')
critic_module = AutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path=local_path,
torch_dtype=torch_dtype,
config=critic_model_config,
attn_implementation='flash_attention_2',
trust_remote_code=trust_remote_code)
# some parameters may not in torch_dtype
critic_module.to(torch_dtype)
if config.model.get('enable_gradient_checkpointing', False):
critic_module.gradient_checkpointing_enable(gradient_checkpointing_kwargs={'use_reentrant': False})
if self.rank == 0:
print_model_size(critic_module)
self.critic_model_config = critic_model_config
fsdp_config = self.config.model.fsdp_config
mixed_precision_config = fsdp_config.get('mixed_precision', None)
if mixed_precision_config is not None:
param_dtype = PrecisionType.to_dtype(mixed_precision_config.get('param_dtype', 'bf16'))
reduce_dtype = PrecisionType.to_dtype(mixed_precision_config.get('reduce_dtype', 'fp32'))
buffer_dtype = PrecisionType.to_dtype(mixed_precision_config.get('buffer_dtype', 'fp32'))
else:
param_dtype = torch.bfloat16
reduce_dtype = torch.float32
buffer_dtype = torch.float32
mixed_precision = MixedPrecision(param_dtype=param_dtype, reduce_dtype=reduce_dtype, buffer_dtype=buffer_dtype)
auto_wrap_policy = get_fsdp_wrap_policy(module=critic_module, config=self.config.model.fsdp_config.wrap_policy)
log_gpu_memory_usage('Before critic FSDP', logger=None)
fsdp_mesh = self.device_mesh
sharding_strategy = get_sharding_strategy(fsdp_mesh)
# Note: We force turn off CPUOffload for critic because it causes incorrect results when using grad accumulation
critic_module = FSDP(critic_module,
param_init_fn=init_fn,
use_orig_params=False,
auto_wrap_policy=auto_wrap_policy,
device_id=torch.cuda.current_device(),
sharding_strategy=sharding_strategy,
mixed_precision=mixed_precision,
sync_module_states=True,
forward_prefetch=False,
device_mesh=self.device_mesh,
cpu_offload=None)
log_gpu_memory_usage('After critic FSDP', logger=None)
critic_optimizer = optim.AdamW(critic_module.parameters(),
lr=config.optim.lr,
betas=config.optim.get('betas', (0.9, 0.999)),
weight_decay=config.optim.get('weight_decay', 1e-2))
total_steps = config.optim.get('total_training_steps', 0)
num_warmup_steps_ratio = config.optim.get('lr_warmup_steps_ratio', 0.)
num_warmup_steps = int(num_warmup_steps_ratio * total_steps)
print(f'Total steps: {total_steps}, num_warmup_steps: {num_warmup_steps}')
from verl.utils.torch_functional import get_constant_schedule_with_warmup
critic_lr_scheduler = get_constant_schedule_with_warmup(optimizer=critic_optimizer,
num_warmup_steps=num_warmup_steps)
return critic_module, critic_optimizer, critic_lr_scheduler
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def init_model(self):
# This is used to import external_lib into the huggingface systems
import_external_libs(self.config.model.get('external_lib', None))
from verl.workers.critic import DataParallelPPOCritic
self.critic_module, self.critic_optimizer, self.critic_lr_scheduler = self._build_critic_model_optimizer(
self.config)
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.critic_module)
if self._is_offload_optimizer:
offload_fsdp_optimizer(optimizer=self.critic_optimizer)
self.critic = DataParallelPPOCritic(config=self.config,
critic_module=self.critic_module,
critic_optimizer=self.critic_optimizer)
self.flops_counter = FlopsCounter(self.critic_model_config)
self.checkpoint_manager = FSDPCheckpointManager(
model=self.critic_module,
optimizer=self.critic_optimizer,
lr_scheduler=self.critic_lr_scheduler,
processing_class=self.processor if self.processor is not None else self.tokenizer)
torch.cuda.empty_cache()
@register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
def compute_values(self, data: DataProto):
data = data.to('cuda')
if self._is_offload_param:
load_fsdp_model_to_gpu(self.critic_module)
micro_batch_size = self.config.forward_micro_batch_size_per_gpu
data.meta_info['micro_batch_size'] = micro_batch_size
data.meta_info['max_token_len'] = self.config.forward_max_token_len_per_gpu
data.meta_info['use_dynamic_bsz'] = self.config.use_dynamic_bsz
# perform forward computation
with self.ulysses_sharding_manager:
data = self.ulysses_sharding_manager.preprocess_data(data=data)
values = self.critic.compute_values(data=data)
output = DataProto.from_dict(tensors={'values': values})
output = self.ulysses_sharding_manager.postprocess_data(data=output)
output = output.to('cpu')
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.critic_module)
return output
@register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
def update_critic(self, data: DataProto):
data = data.to('cuda')
if self._is_offload_param:
load_fsdp_model_to_gpu(self.critic_module)
if self._is_offload_optimizer:
load_fsdp_optimizer(optimizer=self.critic_optimizer, device_id=torch.cuda.current_device())
# perform forward computation
with self.ulysses_sharding_manager:
data = self.ulysses_sharding_manager.preprocess_data(data=data)
with Timer(name='update_critic', logger=None) as timer:
metrics = self.critic.update_critic(data=data)
delta_time = timer.last
global_num_tokens = data.meta_info['global_token_num']
estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time)
metrics['mfu/critic'] = estimated_flops * self.config.ppo_epochs / promised_flops / self.world_size
self.critic_lr_scheduler.step()
lr = self.critic_lr_scheduler.get_last_lr()[0]
metrics['critic/lr'] = lr
output = DataProto(batch=None, meta_info={'metrics': metrics})
output = self.ulysses_sharding_manager.postprocess_data(data=output)
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.critic_module)
if self._is_offload_optimizer:
offload_fsdp_optimizer(optimizer=self.critic_optimizer)
torch.cuda.empty_cache()
output = output.to('cpu')
return output
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_checkpoint(self, local_path, hdfs_path=None, global_step=0, remove_previous_ckpt=False):
import torch
if self._is_offload_param:
load_fsdp_model_to_gpu(self.critic_module)
self.checkpoint_manager.save_checkpoint(local_path=local_path,
hdfs_path=hdfs_path,
global_step=global_step,
remove_previous_ckpt=remove_previous_ckpt)
torch.distributed.barrier()
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.critic_module)
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_checkpoint(self, path, del_local_after_load=True):
import torch
if self._is_offload_param:
load_fsdp_model_to_gpu(self.critic_module)
self.checkpoint_manager.load_checkpoint(path=path, del_local_after_load=del_local_after_load)
torch.distributed.barrier()
if self._is_offload_param:
offload_fsdp_model_to_cpu(self.critic_module)
if self._is_offload_optimizer:
offload_fsdp_optimizer(self.critic_optimizer)
# TODO(sgm): we may need to extract it to dp_reward_model.py
class RewardModelWorker(Worker):
"""
Note that we only implement the reward model that is subclass of AutoModelForTokenClassification.
"""
def __init__(self, config):
super().__init__()
import torch.distributed
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl")
self.config = config
# build device mesh for Ulysses Sequence Parallel
world_size = torch.distributed.get_world_size()
from torch.distributed.device_mesh import init_device_mesh
fsdp_size = self.config.model.fsdp_config.fsdp_size
self.device_mesh = create_device_mesh(world_size=world_size, fsdp_size=fsdp_size)
self.ulysses_device_mesh = None
self.ulysses_sequence_parallel_size = self.config.get('ulysses_sequence_parallel_size', 1)
dp = world_size // self.ulysses_sequence_parallel_size
if self.ulysses_sequence_parallel_size > 1:
self.ulysses_device_mesh = init_device_mesh('cuda',
mesh_shape=(dp, self.ulysses_sequence_parallel_size),
mesh_dim_names=['dp', 'sp'])
self.ulysses_sharding_manager = FSDPUlyssesShardingManager(self.ulysses_device_mesh)
self.use_remove_padding = self.config.model.get('use_remove_padding', False)
# normalize config
if self.config.micro_batch_size is not None:
self.config.micro_batch_size //= torch.distributed.get_world_size()
self.config.micro_batch_size_per_gpu = self.config.micro_batch_size
def _build_model(self, config):
# the following line is necessary
from transformers import AutoModelForTokenClassification, AutoConfig
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, ShardingStrategy, CPUOffload
# download the checkpoint from hdfs
local_path = copy_to_local(config.model.path)
if self.config.model.input_tokenizer is None:
self._do_switch_chat_template = False
else:
self._do_switch_chat_template = True
input_tokenizer_local_path = copy_to_local(config.model.input_tokenizer)
self.input_tokenizer = hf_tokenizer(input_tokenizer_local_path,
trust_remote_code=config.model.get('trust_remote_code', False))
self.tokenizer = hf_tokenizer(local_path, trust_remote_code=config.model.get('trust_remote_code', False))
trust_remote_code = config.model.get('trust_remote_code', False)
model_config = AutoConfig.from_pretrained(local_path, trust_remote_code=trust_remote_code)
model_config.num_labels = 1
use_remove_padding = config.model.get('use_remove_padding', False)
if use_remove_padding:
from verl.models.registry import check_model_support_rmpad
check_model_support_rmpad(model_config.model_type)
if use_remove_padding and self.ulysses_sequence_parallel_size > 1:
from verl.models.transformers.monkey_patch import apply_monkey_patch
apply_monkey_patch(model_config, verbose=True)
# note that we have to create model in fp32. Otherwise, the optimizer is in bf16, which is incorrect
init_context = get_init_weight_context_manager(use_meta_tensor=not model_config.tie_word_embeddings)
with init_context(), warnings.catch_warnings():
warnings.simplefilter("ignore")
setattr(model_config, 'classifier_dropout', 0.)
reward_module = AutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path=local_path,
config=model_config,
torch_dtype=torch.bfloat16,
attn_implementation='flash_attention_2',
trust_remote_code=trust_remote_code)
reward_module.to(torch.bfloat16)
auto_wrap_policy = get_fsdp_wrap_policy(module=reward_module, config=self.config.model.fsdp_config)
fsdp_mesh = self.device_mesh
sharding_strategy = get_sharding_strategy(fsdp_mesh)
reward_module = FSDP(
reward_module,
param_init_fn=init_fn,
use_orig_params=False,
auto_wrap_policy=auto_wrap_policy,
device_id=torch.cuda.current_device(),
sharding_strategy=sharding_strategy, # zero3
sync_module_states=True,
cpu_offload=CPUOffload(offload_params=True),
forward_prefetch=False,
device_mesh=self.device_mesh)
return reward_module
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def init_model(self):
# This is used to import external_lib into the huggingface systems
import_external_libs(self.config.model.get('external_lib', None))
self.reward_module = self._build_model(config=self.config)
torch.cuda.empty_cache()
def _forward_micro_batch(self, micro_batch):
from flash_attn.bert_padding import pad_input, unpad_input, index_first_axis, rearrange
from verl.utils.ulysses import ulysses_pad_and_slice_inputs, gather_outpus_and_unpad
with torch.no_grad(), torch.autocast(device_type='cuda', dtype=torch.bfloat16):
input_ids = micro_batch['input_ids']
batch_size, seqlen = input_ids.shape
attention_mask = micro_batch['attention_mask']
position_ids = micro_batch['position_ids']
if self.use_remove_padding:
input_ids_rmpad, indices, *_ = unpad_input(input_ids.unsqueeze(-1),
attention_mask) # input_ids_rmpad (total_nnz, ...)
input_ids_rmpad = input_ids_rmpad.transpose(0, 1) # (1, total_nnz)
# unpad the position_ids to align the rotary
position_ids_rmpad = index_first_axis(rearrange(position_ids.unsqueeze(-1), "b s ... -> (b s) ..."),
indices).transpose(0, 1)
# pad and slice the inputs if sp > 1
if self.ulysses_sequence_parallel_size > 1:
input_ids_rmpad, position_ids_rmpad, pad_size = ulysses_pad_and_slice_inputs(input_ids_rmpad, \
position_ids_rmpad, \
sp_size=self.ulysses_sequence_parallel_size)
# only pass input_ids and position_ids to enable flash_attn_varlen
output = self.reward_module(input_ids=input_ids_rmpad,
attention_mask=None,
position_ids=position_ids_rmpad,
use_cache=False) # prevent model thinks we are generating
reward_rmpad = output.logits
reward_rmpad = reward_rmpad.squeeze(0) # (total_nnz)
# gather output if sp > 1
if self.ulysses_sequence_parallel_size > 1:
reward_rmpad = gather_outpus_and_unpad(reward_rmpad,
gather_dim=0,
unpad_dim=0,
padding_size=pad_size)
# pad it back
rm_score = pad_input(reward_rmpad, indices=indices, batch=batch_size, seqlen=seqlen).squeeze(-1)
else:
output = self.reward_module(input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids)
rm_score = output.logits # (batch_size, seq_len, 1)
rm_score = rm_score.squeeze(-1)
# extract the result of the last valid token
eos_mask_idx = torch.argmax(position_ids * attention_mask, dim=-1) # (bsz,)
rm_score = rm_score[torch.arange(batch_size), eos_mask_idx]
return rm_score
def _expand_to_token_level(self, data: DataProto, scores: torch.Tensor):
batch_size = data.batch.batch_size[0]
# expand as token_level_reward
attention_mask = data.batch['attention_mask']
position_ids = data.batch['position_ids']
response_length = data.batch['responses'].shape[-1]
eos_mask_idx = torch.argmax(position_ids * attention_mask, dim=-1) # (bsz,)
token_level_scores = torch.zeros_like(attention_mask, dtype=scores.dtype) # (bsz, seqlen)
token_level_scores[torch.arange(batch_size), eos_mask_idx] = scores
# select the response part
token_level_scores = token_level_scores[:, -response_length:]
return token_level_scores
def _switch_chat_template(self, data: DataProto):
src_max_length = data.batch['attention_mask'].shape[-1]
src_tokenizer = self.input_tokenizer
target_tokenizer = self.tokenizer
rm_input_ids = []
rm_attention_mask = []
for i in range(data.batch.batch_size[0]):
# extract raw prompt
chat: list = data.non_tensor_batch['raw_prompt'][i].tolist()
# extract response
response_ids = data.batch['responses'][i]
response_length = response_ids.shape[-1]
valid_response_length = data.batch['attention_mask'][i][-response_length:].sum()
valid_response_ids = response_ids[:valid_response_length]
# decode
response = src_tokenizer.decode(valid_response_ids)
# remove bos and eos
response = response.replace(src_tokenizer.eos_token, '')
chat.append({'role': 'assistant', 'content': response})
prompt_with_chat_template = target_tokenizer.apply_chat_template(chat,
add_generation_prompt=False,
tokenize=False)
if self.rank == 0 and i == 0:
# for debugging purpose
print(f'Switch template. chat: {prompt_with_chat_template}')
# the maximum length is actually determined by the reward model itself
max_length = self.config.get('max_length', src_max_length)
if max_length is None:
max_length = src_max_length
input_ids, attention_mask = verl_F.tokenize_and_postprocess_data(
prompt=prompt_with_chat_template,
tokenizer=target_tokenizer,
max_length=max_length,
pad_token_id=target_tokenizer.pad_token_id,
left_pad=False, # right padding
truncation=self.config.get('truncation', 'right')) # truncate from the right
rm_input_ids.append(input_ids)
rm_attention_mask.append(attention_mask)
rm_input_ids = torch.cat(rm_input_ids, dim=0)
rm_attention_mask = torch.cat(rm_attention_mask, dim=0)
rm_position_ids = compute_position_id_with_mask(rm_attention_mask)
rm_inputs = {'input_ids': rm_input_ids, 'attention_mask': rm_attention_mask, 'position_ids': rm_position_ids}
return DataProto.from_dict(rm_inputs)
@register(dispatch_mode=Dispatch.DP_COMPUTE_PROTO)
def compute_rm_score(self, data: DataProto):
import itertools
from verl.utils.seqlen_balancing import rearrange_micro_batches, get_reverse_idx
data = data.to('cuda')
if self._do_switch_chat_template:
rm_data = self._switch_chat_template(data)
rm_data.batch = rm_data.batch.cuda()
# perform forward computation
with self.ulysses_sharding_manager:
rm_data = self.ulysses_sharding_manager.preprocess_data(data=rm_data)
data = self.ulysses_sharding_manager.preprocess_data(data=data)
use_dynamic_bsz = self.config.use_dynamic_bsz
if use_dynamic_bsz:
max_token_len = self.config.forward_max_token_len_per_gpu * self.ulysses_sequence_parallel_size
micro_batches, indices = rearrange_micro_batches(batch=rm_data.batch, max_token_len=max_token_len)
else:
micro_batches = rm_data.batch.split(self.config.micro_batch_size_per_gpu)
output = []
for micro_batch in micro_batches:
rm_score = self._forward_micro_batch(micro_batch)
output.append(rm_score)
scores = torch.cat(output, dim=0) # (batch_size)
if use_dynamic_bsz:
indices = list(itertools.chain.from_iterable(indices))
assert len(indices) == scores.size(0), f"{len(indices)} vs. {scores.size()}"
revert_indices = torch.tensor(get_reverse_idx(indices), dtype=torch.long)
scores = scores[revert_indices]
token_level_scores = self._expand_to_token_level(data, scores)
# Note that this is only the scores, may not be the final rewards used to train RL
output = DataProto.from_dict(tensors={'rm_scores': token_level_scores})
output = self.ulysses_sharding_manager.postprocess_data(data=output)
# https://pytorch.org/docs/stable/notes/fsdp.html#fsdp-notes
# unshard the root FSDP module
self.reward_module._handle.reshard(True)
output = output.to('cpu')
torch.cuda.empty_cache()
return output
================================================
FILE: verl/workers/megatron_workers.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main entry point to run the PPO algorithm
"""
import os
import logging
import ray
import torch
import torch.distributed
import torch.nn as nn
from omegaconf import DictConfig
from verl.single_controller.base.megatron.worker import MegatronWorker
from verl.workers.actor.megatron_actor import MegatronPPOActor
from verl.workers.critic.megatron_critic import MegatronPPOCritic
from verl.workers.sharding_manager import AllGatherPPModel
from verl.workers.reward_model.megatron.reward_model import MegatronRewardModel
from verl.single_controller.base.decorator import register, Dispatch
from verl import DataProto
from verl.utils.fs import copy_to_local
from verl.utils.debug import log_gpu_memory_usage
from verl.utils.model import load_megatron_model_weights
from verl.utils.flops_counter import FlopsCounter
from verl.utils.megatron_utils import init_model_parallel_config
from verl.utils.megatron_utils import offload_megatron_param_and_grad, load_megatron_param_and_grad
from verl.utils import hf_tokenizer
from codetiming import Timer
from megatron.core import parallel_state as mpu
from megatron.core import ModelParallelConfig
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv('VERL_PPO_LOGGING_LEVEL', 'WARN'))
def set_random_seed(seed):
import torch
import numpy as np
import random
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if torch.cuda.device_count() > 0:
from megatron.core import tensor_parallel
tensor_parallel.model_parallel_cuda_manual_seed(seed)
# FIXME: torch cumsum not support deterministic (used in vllm sampler),
# https://github.com/pytorch/pytorch/issues/89492
# torch.use_deterministic_algorithms(True, warn_only=True)
# os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
class ActorRolloutRefWorker(MegatronWorker):
"""
This worker can be instantiated as a standalone actor or a standalone rollout or a standalone reference policy
or a hybrid engine based on the config.rollout
"""
def __init__(self, config: DictConfig, role: str):
super().__init__()
self.config = config
# NOTE(sgm): We utilize colocate WorkerGroup by default.
# As a result, Workers for different model share the same process.
# Therefore, we only require one distribute initialization.
# To utilize different parallel startegy in different models:
# 1, users should disable WorkerDict; 2.assign different ResourcePool to different models,
# 3. and apply the following patch in ray==2.10, https://github.com/ray-project/ray/pull/44385
if not torch.distributed.is_initialized():
rank = int(os.environ['LOCAL_RANK'])
torch.distributed.init_process_group(backend="nccl")
torch.cuda.set_device(rank)
if self.config.actor.megatron.sequence_parallel:
os.environ['CUDA_DEVICE_MAX_CONNECTIONS'] = '1'
mpu.initialize_model_parallel(
tensor_model_parallel_size=self.config.actor.megatron.tensor_model_parallel_size,
pipeline_model_parallel_size=self.config.actor.megatron.pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size=None,
pipeline_model_parallel_split_rank=None,
use_sharp=False,
context_parallel_size=1,
expert_model_parallel_size=1,
nccl_communicator_config_path=None,
)
set_random_seed(seed=self.config.actor.megatron.seed)
self.role = role
assert self.role in ['actor', 'rollout', 'ref', 'actor_rollout', 'actor_rollout_ref']
self._is_actor = self.role in ['actor', 'actor_rollout', 'actor_rollout_ref']
self._is_rollout = self.role in ['rollout', 'actor_rollout', 'actor_rollout_ref']
self._is_ref = self.role in ['ref', 'actor_rollout_ref']
# TODO(sgm): Currently, we only support reference model param offload
# will support other offload later
self._is_offload_param = False
self._is_offload_grad = False
self._is_offload_optimizer = False
# normalize config
if self._is_actor and self._is_rollout:
self.config.actor.ppo_mini_batch_size *= self.config.rollout.n
self.config.actor.ppo_mini_batch_size //= mpu.get_data_parallel_world_size()
if self.config.actor.get('ppo_micro_batch_size', None):
self.config.actor.ppo_micro_batch_size //= mpu.get_data_parallel_world_size()
self.config.rollout.log_prob_micro_batch_size //= mpu.get_data_parallel_world_size()
self.config.actor.ppo_micro_batch_size_per_gpu = self.config.actor.ppo_micro_batch_size
self.config.rollout.log_prob_micro_batch_size_per_gpu = self.config.rollout.log_prob_micro_batch_size
self._is_offload_param = self.config.actor.get('param_offload', False)
self._is_offload_grad = self.config.actor.get('grad_offload', False)
self._is_offload_optimizer = self.config.actor.get('optimizer_offload', False)
elif self._is_ref:
if self.config.ref.get('ppo_micro_batch_size', None):
self.config.ref.log_prob_micro_batch_size //= mpu.get_data_parallel_world_size()
self.config.ref.ppo_micro_batch_size_per_gpu = self.config.ref.ppo_micro_batch_size
self._is_offload_param = self.config.ref.get('param_offload', False)
def _build_model_optimizer(self,
model_path,
megatron_config: ModelParallelConfig,
optim_config,
override_model_config,
enable_gradient_checkpointing=False):
from verl.utils.megatron.optimizer import get_megatron_optimizer
from megatron.core.models.gpt.gpt_model import ModelType
from verl.utils.model import print_model_size, update_model_config, get_generation_config
from verl.utils.megatron_utils import get_model, init_megatron_optim_config
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig, GenerationConfig
# Step 1: initialize the tokenizer
local_path = copy_to_local(model_path)
self.tokenizer = hf_tokenizer(local_path)
# Step 2: get the actor_model_config
actor_model_config = AutoConfig.from_pretrained(local_path)
self.generation_config = get_generation_config(local_path)
override_config_kwargs = {
'bos_token_id': self.tokenizer.bos_token_id,
'eos_token_id': self.tokenizer.eos_token_id,
'pad_token_id': self.tokenizer.pad_token_id,
}
override_config_kwargs.update(override_model_config)
update_model_config(actor_model_config, override_config_kwargs=override_config_kwargs)
if self.rank == 0:
print(f'Model config after override: {actor_model_config}')
def megatron_actor_model_provider(pre_process, post_process):
from verl.utils.model import get_parallel_model_from_config
# vpp is not supported yet because it will hang for some reason. Need debugging
vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank() # this will be set inside get_model
# this_megatron_config = copy.deepcopy(megatron_config)
# this_megatron_config.virtual_pipeline_model_parallel_rank = vpp_rank
share_embeddings_and_output_weights = getattr(actor_model_config, "tie_word_embeddings", False)
parallel_model = get_parallel_model_from_config(
config=actor_model_config,
megatron_config=megatron_config,
pre_process=pre_process,
post_process=post_process,
share_embeddings_and_output_weights=share_embeddings_and_output_weights,
value=False)
parallel_model.cuda()
return parallel_model
# Step 3: initialize the megatron model
if self._is_actor and self._is_rollout:
# Initialize the 3D HybridEngine
hybrid_engine = AllGatherPPModel(model_provider=megatron_actor_model_provider)
# Fetch the model at current rank
actor_module = hybrid_engine.this_rank_models
if isinstance(actor_module, nn.ModuleList):
actor_module = [actor_module[0]]
if self.config.actor.load_weight:
load_megatron_model_weights(self.config,
actor_model_config,
actor_module,
params_dtype=megatron_config.params_dtype,
is_value_model=False)
if self.rank == 0:
print_model_size(actor_module[0])
log_gpu_memory_usage('After AllGatherPPModel init', logger=logger)
elif self._is_ref:
print(f'self.config.ref.load_weight: {self.config.ref.load_weight}')
ref_module = get_model(model_provider_func=megatron_actor_model_provider,
model_type=ModelType.encoder_or_decoder,
wrap_with_ddp=False)
# ref_module = nn.ModuleList(ref_module)
if self.config.ref.load_weight: # should align with the actor:
assert self.config.actor.load_weight == self.config.ref.load_weight
print(f'load ref weight start')
load_megatron_model_weights(self.config,
actor_model_config,
ref_module,
params_dtype=megatron_config.params_dtype,
is_value_model=False)
log_gpu_memory_usage('After ref module init', logger=logger)
return ref_module, actor_model_config
# TODO: add more optimizer args into config
if self._is_actor:
optim_config = init_megatron_optim_config(optim_config)
actor_optimizer = get_megatron_optimizer(model=actor_module, config=optim_config)
else:
optim_config = None
actor_optimizer = None
log_gpu_memory_usage('After actor optimizer init', logger=logger)
return actor_module, hybrid_engine, actor_optimizer, actor_model_config, optim_config
def _build_rollout(self):
if self.config.rollout.name == 'vllm':
from verl.workers.rollout.vllm_rollout import vLLMRollout, vllm_mode
from verl.workers.sharding_manager import MegatronVLLMShardingManager
from verl.utils.model import normalize_pp_vpp_params
# NOTE(sgm): If the QKV and gate_up projection layer are concate together in actor,
# we will reorganize their weight format when resharding from actor to rollout.
layer_name_mapping = {
"qkv_layer_name":
self.config.rollout.layer_name_map.get("qkv_layer_name", "qkv"),
"gate_proj_layer_name":
self.config.rollout.layer_name_map.get("gate_proj_layer_name", "linear_fc1.weight"),
}
# reshard the weight partition from actor to rollout to initialize the rollout class
# create a new cuda space for parameters not in this pp rank
self.hybrid_engine.load_params_to_cuda()
# broadcast the parameters from pp rank to other ranks
self.hybrid_engine.allgather_params()
# obtain name to parameters in pp/vpp
params = self.hybrid_engine.get_all_params()
# update the param name for the
params = normalize_pp_vpp_params(params=params,
num_hidden_layers=self.actor_model_config.num_hidden_layers,
layer_name='layers')
assert vllm_mode == 'customized', "Support for vllm>=0.7 for Megatron-LM backend has not been implemented yet."
rollout = vLLMRollout(actor_module=params,
config=self.config.rollout,
tokenizer=self.tokenizer,
model_hf_config=self.actor_model_config,
train_tp=mpu.get_tensor_model_parallel_world_size())
log_gpu_memory_usage('After building vllm rollout', logger=logger)
# perform weight resharding between actor and rollout
sharding_manager = MegatronVLLMShardingManager(module=self.hybrid_engine,
inference_engine=rollout.inference_engine,
model_config=self.actor_model_config,
layer_name_mapping=layer_name_mapping)
log_gpu_memory_usage('After building sharding manager', logger=logger)
else:
NotImplementedError('Only vllmRollout is supported with Megatron now')
return rollout, sharding_manager
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def init_model(self):
if self.config.model.get('external_lib', None) is not None:
# This is used to import external_lib into the huggingface systems
import importlib
importlib.import_module(self.config.model.external_lib)
from omegaconf import OmegaConf
from verl.utils.torch_dtypes import PrecisionType
override_model_config = OmegaConf.to_container(self.config.model.get('override_config', OmegaConf.create()))
torch_dtype = torch.bfloat16
megatron_config = OmegaConf.create({
'sequence_parallel': self.config.actor.megatron.get('sequence_parallel', True),
'param_dtype': PrecisionType.to_str(torch_dtype),
'tensor_model_parallel_size': mpu.get_tensor_model_parallel_world_size(),
'pipeline_model_parallel_rank': mpu.get_pipeline_model_parallel_rank(),
'pipeline_model_parallel_size': mpu.get_pipeline_model_parallel_world_size(),
'virtual_pipeline_model_parallel_rank': mpu.get_virtual_pipeline_model_parallel_rank(),
'virtual_pipeline_model_parallel_size': mpu.get_virtual_pipeline_model_parallel_world_size()
})
megatron_config = init_model_parallel_config(megatron_config)
if self._is_actor or self._is_rollout:
# we need the model for actor and rollout
if self._is_actor:
optim_config = self.config.actor.optim
else:
optim_config = None
self.actor_module, self.hybrid_engine, self.actor_optimizer, \
self.actor_model_config, self.actor_optim_config = self._build_model_optimizer(
model_path=self.config.model.path,
megatron_config=megatron_config,
optim_config=optim_config,
override_model_config=override_model_config,
)
if self._is_actor:
self.actor = MegatronPPOActor(config=self.config.actor,
model_config=self.actor_model_config,
megatron_config=megatron_config,
actor_module=self.actor_module,
actor_optimizer=self.actor_optimizer,
actor_optimizer_config=self.actor_optim_config)
if self._is_rollout:
self.rollout, self.sharding_manager = self._build_rollout()
if self._is_ref:
self.ref_module, self.ref_model_config = self._build_model_optimizer(
model_path=self.config.model.path,
megatron_config=megatron_config,
optim_config=None,
override_model_config=override_model_config,
)
self.ref_policy = MegatronPPOActor(config=self.config.ref,
model_config=self.ref_model_config,
megatron_config=megatron_config,
actor_module=self.ref_module,
actor_optimizer=None,
actor_optimizer_config=None)
if self._is_actor:
self.flops_counter = FlopsCounter(self.actor_model_config)
torch.cuda.empty_cache()
@register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO)
def update_actor(self, data: DataProto):
assert self._is_actor
data.batch = data.batch.cuda()
log_gpu_memory_usage('Before update policy', logger=logger)
dataloader = self.actor.make_minibatch_iterator(data=data)
with Timer(name='update_policy', logger=None) as timer:
metrics = self.actor.update_policy(dataloader=dataloader)
delta_time = timer.last
global_num_tokens = data.meta_info['global_token_num']
estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time)
metrics['mfu/actor'] = estimated_flops * self.config.actor.ppo_epochs / promised_flops / self.world_size
log_gpu_memory_usage('After update policy', logger=logger)
# TODO: here, we should return all metrics
output = DataProto(meta_info={'metrics': metrics})
output = output.to('cpu')
torch.cuda.empty_cache()
return output
@register(dispatch_mode=Dispatch.MEGATRON_PP_AS_DP_PROTO)
def generate_sequences(self, prompts: DataProto):
assert self._is_rollout
prompts.batch = prompts.batch.cuda()
meta_info = {
'eos_token_id':
self.generation_config.eos_token_id
if self.generation_config is not None else self.tokenizer.eos_token_id,
'pad_token_id':
self.generation_config.pad_token_id
if self.generation_config is not None else self.tokenizer.pad_token_id,
}
prompts.meta_info.update(meta_info)
with self.sharding_manager:
log_gpu_memory_usage('After entering sharding manager', logger=logger)
prompts = self.sharding_manager.preprocess_data(prompts)
output = self.rollout.generate_sequences(prompts=prompts)
log_gpu_memory_usage('After rollout generation', logger=logger)
output = self.sharding_manager.postprocess_data(output)
output = output.to('cpu')
# clear kv cache
torch.cuda.empty_cache()
log_gpu_memory_usage('After recompute log prob', logger=logger)
return output
@register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO)
def compute_ref_log_prob(self, data: DataProto):
data = data.to('cuda')
assert self._is_ref
if self._is_offload_param:
load_megatron_param_and_grad(self.ref_module, torch.cuda.current_device(), self._is_offload_grad)
micro_batch_size = self.config.rollout.log_prob_micro_batch_size_per_gpu
data.meta_info['micro_batch_size'] = micro_batch_size
data.meta_info['temperature'] = self.config.rollout.temperature
output = self.ref_policy.compute_log_prob(data=data)
output = DataProto.from_dict(tensors={'ref_log_prob': output})
output = output.to('cpu')
if self._is_offload_param:
offload_megatron_param_and_grad(self.ref_module, self._is_offload_grad)
torch.cuda.empty_cache()
return output
@register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO)
def compute_log_prob(self, data: DataProto):
assert self._is_actor
data = data.to('cuda')
output = data
# we should always recompute old_log_probs when it is HybridEngine
output.meta_info['micro_batch_size'] = self.config.rollout.log_prob_micro_batch_size_per_gpu
output.meta_info['temperature'] = self.config.rollout.temperature
old_log_probs = self.actor.compute_log_prob(data=output)
output.batch['old_log_probs'] = old_log_probs
output = output.to('cpu')
# clear kv cache
torch.cuda.empty_cache()
log_gpu_memory_usage('After recompute log prob', logger=logger)
return output
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_checkpoint(self, checkpoint_path, **kwargs):
pass
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_pretrained_model(self, checkpoint_path, **kwargs):
pass
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_checkpoint(self, checkpoint_path, **kwargs):
assert self._is_actor
pass
class CriticWorker(MegatronWorker):
def __init__(self, config):
super().__init__()
self.config = config
# NOTE(sgm): We utilize colocate WorkerGroup by default.
# As a result, Workers for different model share the same process.
# Therefore, we only require one distribute initialization.
# To utilize different parallel startegy in different models:
# 1, users should disable WorkerDict; 2.assign different ResourcePool to different models,
# 3. and apply the following patch in ray==2.10, https://github.com/ray-project/ray/pull/44385
if not torch.distributed.is_initialized():
rank = int(os.environ['LOCAL_RANK'])
torch.distributed.init_process_group(backend="nccl")
torch.cuda.set_device(rank)
if self.config.megatron.sequence_parallel:
os.environ['CUDA_DEVICE_MAX_CONNECTIONS'] = '1'
mpu.initialize_model_parallel(
tensor_model_parallel_size=self.config.megatron.tensor_model_parallel_size,
pipeline_model_parallel_size=self.config.megatron.pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size=None,
pipeline_model_parallel_split_rank=None,
use_sharp=False,
context_parallel_size=1,
expert_model_parallel_size=1,
nccl_communicator_config_path=None,
)
set_random_seed(seed=self.config.megatron.seed)
# normalize config
self.config.ppo_mini_batch_size //= mpu.get_data_parallel_world_size()
if self.config.get('ppo_micro_batch_size', None):
self.config.ppo_micro_batch_size //= mpu.get_data_parallel_world_size()
self.config.ppo_micro_batch_size_per_gpu = self.config.ppo_micro_batch_size
# TODO(sgm): support critic model offload
def _build_critic_model_optimizer(self,
model_path,
megatron_config: ModelParallelConfig,
optim_config,
override_model_config,
enable_gradient_checkpointing=False):
from megatron.core.models.gpt.gpt_model import ModelType
from verl.utils.model import print_model_size, update_model_config
from verl.utils.megatron.optimizer import get_megatron_optimizer
from verl.utils.megatron_utils import get_model, init_megatron_optim_config, init_model_parallel_config
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
# Step 1: initialize the tokenizer
local_path = copy_to_local(model_path)
self.tokenizer = hf_tokenizer(local_path)
# Step 2: get the actor_model_config
critic_model_config = AutoConfig.from_pretrained(local_path)
override_config_kwargs = {
'bos_token_id': self.tokenizer.bos_token_id,
'eos_token_id': self.tokenizer.eos_token_id,
'pad_token_id': self.tokenizer.pad_token_id,
}
override_config_kwargs.update(override_model_config)
update_model_config(critic_model_config, override_config_kwargs=override_config_kwargs)
if self.rank == 0:
print(f'Model config after override: {critic_model_config}')
def megatron_critic_model_provider(pre_process, post_process):
from verl.utils.model import get_parallel_model_from_config
# TODO: support vpp here
# vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank() # this will be set inside get_model
# this_megatron_config = copy.deepcopy(megatron_config)
# this_megatron_config.virtual_pipeline_model_parallel_rank = vpp_rank
parallel_model = get_parallel_model_from_config(config=critic_model_config,
megatron_config=megatron_config,
pre_process=pre_process,
post_process=post_process,
share_embeddings_and_output_weights=False,
value=True)
parallel_model.cuda()
return parallel_model
# Step 3: initialize the megatron model
critic_module = get_model(model_provider_func=megatron_critic_model_provider,
model_type=ModelType.encoder_or_decoder,
wrap_with_ddp=True)
# note that here critic_module will be a list to be compatible with the construction of interleaved pp (vpp).
# but here, we do not use pp (vpp) yet. For simplicity, we remove the list
# critic_module = nn.ModuleList(critic_module)
if self.config.load_weight:
load_megatron_model_weights(self.config,
critic_model_config,
critic_module,
params_dtype=megatron_config.params_dtype,
is_value_model=True)
if self.rank == 0:
print_model_size(critic_module[0])
# TODO: add more optimizer args into config
optim_config = init_megatron_optim_config(optim_config)
critic_optimizer = get_megatron_optimizer(model=critic_module, config=optim_config)
torch.cuda.empty_cache()
return critic_module, critic_optimizer, critic_model_config, optim_config
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def init_model(self):
# create critic
from omegaconf import OmegaConf
from verl.utils.torch_dtypes import PrecisionType
if self.config.model.get('external_lib', None) is not None:
# This is used to import external_lib into the huggingface systems
import importlib
importlib.import_module(self.config.model.external_lib)
override_model_config = OmegaConf.to_container(self.config.model.get('override_config', OmegaConf.create()))
torch_dtype = torch.bfloat16
megatron_config = OmegaConf.create({
'sequence_parallel': self.config.megatron.get('sequence_parallel', True),
'param_dtype': PrecisionType.to_str(torch_dtype),
'tensor_model_parallel_size': mpu.get_tensor_model_parallel_world_size(),
'pipeline_model_parallel_rank': mpu.get_pipeline_model_parallel_rank(),
'pipeline_model_parallel_size': mpu.get_pipeline_model_parallel_world_size(),
'virtual_pipeline_model_parallel_rank': mpu.get_virtual_pipeline_model_parallel_rank(),
'virtual_pipeline_model_parallel_size': mpu.get_virtual_pipeline_model_parallel_world_size()
})
megatron_config = init_model_parallel_config(megatron_config)
critic_module, critic_optimizer, critic_model_config, critic_optimizer_config = self._build_critic_model_optimizer(
model_path=self.config.model.path,
megatron_config=megatron_config,
optim_config=self.config.optim,
override_model_config=override_model_config)
self.critic = MegatronPPOCritic(config=self.config,
model_config=critic_model_config,
megatron_config=megatron_config,
critic_module=critic_module,
critic_optimizer=critic_optimizer,
critic_optimizer_config=critic_optimizer_config)
self.flops_counter = FlopsCounter(critic_model_config)
@register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO)
def compute_values(self, data: DataProto):
data = data.to('cuda')
values = self.critic.compute_values(data=data)
output = DataProto.from_dict(tensors={'values': values})
output = output.to('cpu')
return output
@register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO)
def update_critic(self, data: DataProto):
data = data.to('cuda')
dataloader = self.critic.make_minibatch_iterator(data)
with Timer(name='update_critic', logger=None) as timer:
metrics = self.critic.update_critic(dataloader=dataloader)
delta_time = timer.last
global_num_tokens = data.meta_info['global_token_num']
estimated_flops, promised_flops = self.flops_counter.estimate_flops(global_num_tokens, delta_time)
metrics['mfu/critic'] = estimated_flops * self.config.ppo_epochs / promised_flops / self.world_size
output = DataProto(batch=None, meta_info={'metrics': metrics})
output = output.to('cpu')
return output
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def load_checkpoint(self, checkpoint_path, **kwargs):
pass
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def save_checkpoint(self, checkpoint_path, **kwargs):
pass
class RewardModelWorker(MegatronWorker):
"""
Note that we only implement the reward model that is subclass of AutoModelForSequenceClassification.
"""
def __init__(self, config):
super().__init__()
self.config = config
# NOTE(sgm): We utilize colocate WorkerGroup by default.
# As a result, Workers for different model share the same process.
# Therefore, we only require one distribute initialization.
# To utilize different parallel startegy in different models:
# 1, users should disable WorkerDict; 2.assign different ResourcePool to different models,
# 3. and apply the following patch in ray==2.10, https://github.com/ray-project/ray/pull/44385
if not torch.distributed.is_initialized():
rank = int(os.environ['LOCAL_RANK'])
torch.distributed.init_process_group(backend="nccl")
torch.cuda.set_device(rank)
if self.config.megatron.sequence_parallel:
os.environ['CUDA_DEVICE_MAX_CONNECTIONS'] = '1'
mpu.initialize_model_parallel(
tensor_model_parallel_size=self.config.megatron.tensor_model_parallel_size,
pipeline_model_parallel_size=self.config.megatron.pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size=None,
pipeline_model_parallel_split_rank=None,
use_sharp=False,
context_parallel_size=1,
expert_model_parallel_size=1,
nccl_communicator_config_path=None,
)
set_random_seed(seed=self.config.megatron.seed)
# normalize config
if self.config.micro_batch_size is not None:
self.config.micro_batch_size //= mpu.get_data_parallel_world_size()
self.config.micro_batch_size_per_gpu = self.config.micro_batch_size
def _build_rm_model(self, model_path, megatron_config: ModelParallelConfig, override_model_config):
from megatron.core.models.gpt.gpt_model import ModelType
from verl.utils.model import print_model_size, update_model_config
from verl.utils.megatron_utils import get_model
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
# Step 1: initialize the tokenizer
local_path = copy_to_local(model_path)
self.tokenizer = hf_tokenizer(local_path)
# Step 2: get the actor_model_config
rm_model_config = AutoConfig.from_pretrained(local_path)
override_config_kwargs = {
'bos_token_id': self.tokenizer.bos_token_id,
'eos_token_id': self.tokenizer.eos_token_id,
'pad_token_id': self.tokenizer.pad_token_id,
}
override_config_kwargs.update(override_model_config)
update_model_config(rm_model_config, override_config_kwargs=override_config_kwargs)
if self.rank == 0:
print(f'Model config after override: {rm_model_config}')
def megatron_rm_model_provider(pre_process, post_process):
from verl.utils.model import get_parallel_model_from_config
# vpp is not supported yet because it will hang for some reason. Need debugging
vpp_rank = mpu.get_virtual_pipeline_model_parallel_rank() # this will be set inside get_model
# this_megatron_config = copy.deepcopy(megatron_config)
# this_megatron_config.virtual_pipeline_model_parallel_rank = vpp_rank
parallel_model = get_parallel_model_from_config(config=rm_model_config,
megatron_config=megatron_config,
pre_process=pre_process,
post_process=post_process,
share_embeddings_and_output_weights=False,
value=True)
parallel_model.cuda()
return parallel_model
# Step 3: initialize the megatron model
reward_model = get_model(model_provider_func=megatron_rm_model_provider,
model_type=ModelType.encoder_or_decoder,
wrap_with_ddp=False)
# note that here critic_module will be a list to be compatible with the construction of interleaved pp (vpp).
# but here, we do not use pp (vpp) yet. For simplicity, we remove the list
# reward_model = nn.ModuleList(reward_model)
if self.config.load_weight:
load_megatron_model_weights(self.config,
rm_model_config,
reward_model,
params_dtype=megatron_config.params_dtype,
is_value_model=True)
# TODO: add more optimizer args into config
torch.cuda.empty_cache()
return reward_model, rm_model_config
@register(dispatch_mode=Dispatch.ONE_TO_ALL)
def init_model(self):
# create critic
from omegaconf import OmegaConf
from verl.utils.torch_dtypes import PrecisionType
from transformers import AutoTokenizer
if self.config.model.get('external_lib', None) is not None:
# This is used to import external_lib into the huggingface systems
import importlib
importlib.import_module(self.config.model.external_lib)
override_model_config = OmegaConf.to_container(self.config.model.get('override_config', OmegaConf.create()))
sft_tokenizer_local_path = copy_to_local(self.config.model.input_tokenizer)
sft_tokenizer = hf_tokenizer(sft_tokenizer_local_path)
rm_tokenizer_path = self.config.model.get('rm_tokenizer', None)
rm_tokenizer = None
if rm_tokenizer_path is not None:
rm_tokenizer_local_path = copy_to_local(rm_tokenizer_path)
rm_tokenizer = hf_tokenizer(rm_tokenizer_local_path)
torch_dtype = torch.bfloat16
megatron_config = OmegaConf.create({
'sequence_parallel': self.config.megatron.get('sequence_parallel', True),
'param_dtype': PrecisionType.to_str(torch_dtype),
'tensor_model_parallel_size': mpu.get_tensor_model_parallel_world_size(),
'pipeline_model_parallel_rank': mpu.get_pipeline_model_parallel_rank(),
'pipeline_model_parallel_size': mpu.get_pipeline_model_parallel_world_size(),
'virtual_pipeline_model_parallel_rank': mpu.get_virtual_pipeline_model_parallel_rank(),
'virtual_pipeline_model_parallel_size': mpu.get_virtual_pipeline_model_parallel_world_size()
})
megatron_config = init_model_parallel_config(megatron_config)
reward_model_module, reward_model_config = self._build_rm_model(
model_path=self.config.model.path,
megatron_config=megatron_config,
override_model_config=override_model_config,
)
# FIXME(sgm): reward model param offload is implemented in MegatronRewardModel
# should be implemented in workers
self.rm = MegatronRewardModel(config=self.config,
reward_model_module=reward_model_module,
model_config=reward_model_config,
megatron_config=megatron_config,
sft_tokenizer=sft_tokenizer,
rm_tokenizer=rm_tokenizer)
# TODO: reward model use itself tokenizer instead of sft tokenizer
# the input_ids, responses, attention_mask and position_ids may be different!
@register(dispatch_mode=Dispatch.MEGATRON_COMPUTE_PROTO)
def compute_rm_score(self, data: DataProto):
data.batch = data.batch.cuda()
output = self.rm.compute_reward(data)
output = output.to('cpu')
return output
================================================
FILE: verl/workers/reward_manager/__init__.py
================================================
# Copyright 2024 PRIME team and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .naive import NaiveRewardManager
from .prime import PrimeRewardManager
================================================
FILE: verl/workers/reward_manager/naive.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from verl import DataProto
from verl.utils.reward_score import _default_compute_score
import torch
class NaiveRewardManager:
"""The reward manager.
"""
def __init__(self, config, tokenizer, num_examine, compute_score=None) -> None:
self.tokenizer = tokenizer
self.num_examine = num_examine # the number of batches of decoded responses to print to the console
self.compute_score = compute_score or _default_compute_score
self.config = config
def __call__(self, data: DataProto):
"""We will expand this function gradually based on the available datasets"""
# If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn
if 'rm_scores' in data.batch.keys():
return data.batch['rm_scores']
reward_tensor = torch.zeros_like(data.batch['responses'], dtype=torch.float32)
already_print_data_sources = {}
for i in range(len(data)):
data_item = data[i] # DataProtoItem
prompt_ids = data_item.batch['prompts']
prompt_length = prompt_ids.shape[-1]
valid_prompt_length = data_item.batch['attention_mask'][:prompt_length].sum()
valid_prompt_ids = prompt_ids[-valid_prompt_length:]
response_ids = data_item.batch['responses']
valid_response_length = data_item.batch['attention_mask'][prompt_length:].sum()
valid_response_ids = response_ids[:valid_response_length]
# decode
prompt_str = self.tokenizer.decode(valid_prompt_ids)
response_str = self.tokenizer.decode(valid_response_ids)
ground_truth = data_item.non_tensor_batch['reward_model']['ground_truth']
data_source = data_item.non_tensor_batch['data_source']
extra_info = data_item.non_tensor_batch.get('extra_info', None)
score = self.compute_score(
data_source=data_source,
solution_str=response_str,
ground_truth=ground_truth,
extra_info=extra_info,
reward_type=self.config.data.reward_type
)
if self.config.data.execution_error_penalty and data_item.batch['execution_passes'].item()==0:
score-=0.5
reward_tensor[i, valid_response_length - 1] = score
if data_source not in already_print_data_sources:
already_print_data_sources[data_source] = 0
if already_print_data_sources[data_source] < self.num_examine:
already_print_data_sources[data_source] += 1
# print("[prompt]", prompt_str)
# print("[response]", response_str)
# print("[ground_truth]", ground_truth)
# print("[score]", score)
return reward_tensor
================================================
FILE: verl/workers/reward_manager/prime.py
================================================
# Copyright 2024 PRIME team and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
from concurrent.futures import ProcessPoolExecutor
from functools import partial
import torch
from verl import DataProto
from verl.utils.reward_score import _default_compute_score
async def single_compute_score(evaluation_func, completion, reference, task, task_extra_info, executor, timeout=300.):
loop = asyncio.get_running_loop()
try:
# Ensure process_completion is called properly
tasks = [
asyncio.wait_for(
loop.run_in_executor(
executor,
partial(evaluation_func, task, completion, reference, task_extra_info) # Ensure synchronous
),
timeout=timeout)
]
return await asyncio.gather(*tasks)
except asyncio.TimeoutError:
print(f"Timeout occurred for completion: {completion}")
return None # Default value for timed-out rows
except Exception as e:
print(f"Error processing completion: {completion[:10]}, Error: {e}")
return None # Default value for failed rows
async def parallel_compute_score_async(evaluation_func,
completions,
references,
tasks,
extra_info=None,
num_processes=64):
scores = []
with ProcessPoolExecutor(max_workers=num_processes) as executor:
if extra_info is None:
extra_info = [None] * len(tasks)
# Create tasks for all rows
tasks_async = [
single_compute_score(evaluation_func, completion, reference, task, task_extra_info, executor, timeout=300.)
for completion, reference, task, task_extra_info in zip(completions, references, tasks, extra_info)
]
# to prevent very occasional starvation caused by some anomalous programs ( like infinite loop ), the exceptions in async programs will instantly halt the evaluation, and all summoned processes will be killed.
try:
results = await asyncio.gather(*tasks_async, return_exceptions=False)
except:
for pid, proc in executor._processes.items():
try:
proc.kill()
except Exception as kill_err:
print('shut down failed: ' + str(kill_err))
raise
# Process results
for result, completion, reference, task in zip(results, completions, references, tasks):
if isinstance(result, Exception) or result is None:
# Handle failed or timed-out tasks
scores.append(0.0)
elif isinstance(result[0], (int, float, bool)):
scores.append(float(result[0]))
else:
scores.append(float(result[0][0]))
return scores
class PrimeRewardManager:
"""
The Reward Manager used in https://github.com/PRIME-RL/PRIME
"""
def __init__(self, tokenizer, num_examine, compute_score=None) -> None:
self.tokenizer = tokenizer
self.num_examine = num_examine # the number of batches of decoded responses to print to the console
self.compute_score = compute_score or _default_compute_score
def __call__(self, data: DataProto):
"""We will expand this function gradually based on the available datasets"""
# If there is rm score, we directly return rm score. Otherwise, we compute via rm_score_fn
if 'rm_scores' in data.batch.keys():
return data.batch['rm_scores']
reward_tensor = torch.zeros_like(data.batch['responses'], dtype=torch.float32)
already_print_data_sources = {}
# batched scoring
prompt_ids = data.batch['prompts']
prompt_length = prompt_ids.shape[-1]
response_ids = data.batch['responses']
valid_response_length = data.batch['attention_mask'][:, prompt_length:].sum(dim=-1)
response_str = self.tokenizer.batch_decode(response_ids, skip_special_tokens=True)
ground_truth = [data_item.non_tensor_batch['reward_model']['ground_truth'] for data_item in data]
data_sources = data.non_tensor_batch['data_source']
extra_info = data.non_tensor_batch.get('extra_info', [None] * len(data_sources))
assert len(response_str) == len(ground_truth) == len(data_sources) == len(extra_info)
try:
scores = asyncio.run(
parallel_compute_score_async(self.compute_score,
response_str,
ground_truth,
data_sources,
extra_info,
num_processes=64))
except asyncio.TimeoutError as e:
print('Global timeout in reward computing! Setting all as 0.')
scores = [0. for _ in range(len(response_str))]
except Exception as e:
print(f"Unexpected error in batched reward computing. Setting all as 0.: {e}")
scores = [0. for _ in range(len(response_str))]
for i in range(len(data)):
data_source = data_sources[i]
reward_tensor[i, valid_response_length[i].item() - 1] = scores[i]
if data_source not in already_print_data_sources:
already_print_data_sources[data_source] = 0
if already_print_data_sources[data_source] < self.num_examine:
already_print_data_sources[data_source] += 1
print("[response]", response_str[i])
return reward_tensor
================================================
FILE: verl/workers/reward_model/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import BasePPORewardModel
================================================
FILE: verl/workers/reward_model/base.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The base class for reward model
"""
from abc import ABC, abstractmethod
from verl import DataProto
class BasePPORewardModel(ABC):
def __init__(self, config):
self.config = config
@abstractmethod
def compute_reward(self, data: DataProto) -> DataProto:
"""Computing reward given input_ids. The transformers should output a tensor with shape
[batch_size, sequence_length], and the value at [EOS] mask should be gathered.
Args:
data: must contain keys "input_ids", "attention_mask" and "position_ids".
- input_ids: [batch_size, sequence_length]
- attention_mask: [batch_size, sequence_length]
- position_ids: [batch_size, sequence_length]
Returns: a data pass protocol containing "reward". Only the [EOS] position contains the reward.
Other position should have zero reward. Note that this may change in the future if we use
dense reward. So, we leave the interface for general case.
- reward: [batch_size, sequence_length].
"""
pass
================================================
FILE: verl/workers/reward_model/megatron/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .reward_model import MegatronRewardModel
================================================
FILE: verl/workers/reward_model/megatron/reward_model.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Megatron Reward Model.
"""
from tensordict import TensorDict
from verl import DataProto
import torch
import torch.distributed
from verl.utils.torch_functional import pad_sequence_to_length
from verl.utils.megatron.pipeline_parallel import (compute_transformers_input_shapes, make_batch_generator)
from verl import DataProto
from verl.utils.torch_functional import broadcast_dict_tensor, split_dict_tensor_into_batches
from verl.workers.reward_model.base import BasePPORewardModel
from megatron.core import parallel_state as mpu
from megatron.core.pipeline_parallel import get_forward_backward_func
class MegatronRewardModel(BasePPORewardModel):
def __init__(self,
config,
model_config,
reward_model_module: torch.nn.ModuleList,
megatron_config,
sft_tokenizer=None,
rm_tokenizer=None):
self.config = config
self.reward_model_module = reward_model_module
self.megatron_config = megatron_config
self.model_config = model_config
self.device = 'cuda'
self.sft_tokenizer = sft_tokenizer
self.rm_tokenizer = rm_tokenizer
self.use_different_tokenizer = rm_tokenizer is not None
if self.config.param_offload:
self.offload_params_to_cpu()
def re_encode_by_rm_tokenizer(self, data: DataProto) -> DataProto:
assert self.use_different_tokenizer, 're-encode need rm tokenizer not be None!'
# need to use rm tokenizer to re-generate input_ids, attention_mask and position_ids
# 1. remove pad for each sequence
# 2. decode by sft_tokenizer, remove sft system prompts
# 3. encode by rm_tokenizer with rm system prompts, get rm_input_ids
# 4. generate attention_mask and position_ids
input_ids = data.batch['input_ids'] # (bs, seq_len)
attention_mask = data.batch['attention_mask']
position_ids = data.batch['position_ids']
ori_values = {'input_ids': input_ids, 'attention_mask': attention_mask, 'position_ids': position_ids}
ori_bs, ori_seqlen = input_ids.size(0), input_ids.size(1)
input_ids_for_rm = []
attention_mask_for_rm = []
position_ids_for_rm = []
print_decode = True
ori_seqlen = ori_seqlen + 128
for id, mask in zip(input_ids, attention_mask):
# 1. remove pad for each sequence
non_zero_indices = torch.nonzero(mask).view(-1)
begin_pos, end_pos = non_zero_indices[0].item(), non_zero_indices[-1].item()
valid_id = id[begin_pos:end_pos + 1]
# 2. decode by sft_tokenizer, remove sft system prompts
decode_result = self.sft_tokenizer.decode(valid_id)
# workaround
decode_with_rm_chat = decode_result.replace("<|user|>\n", "[INST] ").replace(
"\n<|assistant|>\n", " [/INST]").replace(" \n<|assistant|>\n", " [/INST]") + ""
if print_decode and torch.distributed.get_rank() == 0:
# only print first decode result
print(f'device {torch.cuda.current_device()}: sft decode result:\n{decode_result}\n \
\ndevice {torch.cuda.current_device()}: sft decode result with rm chat template:\n{decode_with_rm_chat}\n\n'
)
print_decode = False
# 3. encode by rm_tokenizer
rm_input_ids = self.rm_tokenizer(decode_with_rm_chat,
return_tensors='pt')['input_ids'][0].to(input_ids.device)
# 4. generate attention_mask and position_ids
rm_attention_mask = torch.ones_like(rm_input_ids, device=input_ids.device)
cur_seqlen = rm_input_ids.shape[-1]
# NOTE(gh): the later reward compute will process the shape (bs, seqlen_pad_128)
if cur_seqlen > ori_seqlen:
print(f'warninig: rm encode seqlen {cur_seqlen} > sft encode seqlen {ori_seqlen}')
rm_input_ids = rm_input_ids[:ori_seqlen]
rm_attention_mask = rm_attention_mask[:ori_seqlen]
else:
# right padding
rm_input_ids = pad_sequence_to_length(rm_input_ids, ori_seqlen, self.rm_tokenizer.pad_token_id)
rm_attention_mask = pad_sequence_to_length(rm_attention_mask, ori_seqlen, 0)
rm_position_ids = torch.arange(0, ori_seqlen, device=input_ids.device)
input_ids_for_rm.append(torch.unsqueeze(rm_input_ids, dim=0))
attention_mask_for_rm.append(torch.unsqueeze(rm_attention_mask, dim=0))
position_ids_for_rm.append(torch.unsqueeze(rm_position_ids, dim=0))
input_ids_for_rm = torch.cat(input_ids_for_rm, dim=0)
attention_mask_for_rm = torch.cat(attention_mask_for_rm, dim=0)
position_ids_for_rm = torch.cat(position_ids_for_rm, dim=0)
# (bs, seqlen) will not change, but input_ids, attention_mask and position_ids will change
# NOTE(gh): need to replace into origin values after compute reward!
data.batch['input_ids'] = input_ids_for_rm
data.batch['attention_mask'] = attention_mask_for_rm
data.batch['position_ids'] = position_ids_for_rm
return data, ori_values
@torch.no_grad()
def compute_reward(self, data: DataProto) -> DataProto:
if self.config.param_offload:
self.load_params_to_cuda()
if self.use_different_tokenizer:
data, ori_values = self.re_encode_by_rm_tokenizer(data)
input_ids = data.batch['input_ids'] # (bs, seq_len')
attention_mask = data.batch['attention_mask']
position_ids = data.batch['position_ids']
responses = data.batch['responses']
batch_size = responses.size(0)
response_length = responses.size(1)
with torch.no_grad():
output = self.forward_batch(data)
if mpu.is_pipeline_last_stage(ignore_virtual=True):
logits = torch.cat([o['logits'] for o in output], dim=0)
else:
logits = torch.empty(
(input_ids.shape[0], input_ids.shape[1]),
dtype=torch.bfloat16, # TODO(sgm): check why is bfloat16
device=input_ids.device)
# broadcast across pp ranks
torch.distributed.broadcast(tensor=logits,
src=mpu.get_pipeline_model_parallel_last_rank(),
group=mpu.get_pipeline_model_parallel_group(),
async_op=False)
# (bs, seqlen', hidden_size) -> (bs, seqlen', 1) -> (bs, seqlen')
token_level_rewards = logits
# find the last token reward
ends = attention_mask.cumsum(dim=-1).argmax(dim=-1).view(-1, 1) # (bs, 1)
rewards = torch.gather(token_level_rewards, dim=1, index=ends) # (bs, 1)
if self.use_different_tokenizer:
data.batch.update(ori_values)
input_ids = ori_values['input_ids']
attention_mask = ori_values['attention_mask']
position_ids = ori_values['position_ids']
token_level_rewards = rewards.expand(attention_mask.shape[0], attention_mask.shape[1]) # (bs, ori_seqlen)
# assign last valid token reward to ori position
eos_mask_idx = torch.argmax(position_ids * attention_mask, dim=-1) # (bs,)
eos_mask = torch.zeros_like(attention_mask)
eos_mask[torch.arange(batch_size), eos_mask_idx] = 1.
token_level_rewards = token_level_rewards * eos_mask
token_level_rewards = token_level_rewards[:, -response_length:]
if self.config.param_offload:
self.offload_params_to_cpu()
else:
# add empty cache after each compute
torch.cuda.empty_cache()
batch = TensorDict({'rm_scores': token_level_rewards}, batch_size=input_ids.shape[0])
return DataProto(batch=batch)
def forward_batch(self, data: DataProto):
"""
We assume:
- The model takes input: (input_ids, attention_mask, position_ids). No rmpad for the input
- The communication shape is (total_nnz_pad_to_sp // tp_size, 1, hidden_size) if sequence parallel is enabled
"""
# broadcast from last pp rank to all other pp ranks
# TODO: actually, we just need to control the sampling order.
data.batch = data.batch.contiguous()
broadcast_dict_tensor(data.batch,
src=mpu.get_pipeline_model_parallel_last_rank(),
group=mpu.get_pipeline_model_parallel_group())
# split into micro-batches
if self.config is not None and 'ppo_micro_batch_size_per_gpu' in self.config:
infer_batch_size = self.config.ppo_micro_batch_size_per_gpu
else:
infer_batch_size = data.batch.batch_size[0]
data.batch['attention_mask'] = data.batch['attention_mask'].to(bool)
batches = split_dict_tensor_into_batches(data.batch, batch_size=infer_batch_size)
n_micro_batch = len(batches)
seq_len = batches[0]['input_ids'].shape[1]
# compute input shapes for pp stages
input_shapes = compute_transformers_input_shapes(
batches,
meta_info={
'sequence_parallel': self.megatron_config.sequence_parallel,
'hidden_size': self.model_config.hidden_size
})
# compute input shapes for pp stages
forward_backward_func = get_forward_backward_func()
def loss_func(output):
return 1., {'logits': output.logits}
def forward_step(batch_iter, model):
batch = next(batch_iter)
input_ids = batch['input_ids']
attention_mask = batch['attention_mask']
position_ids = batch['position_ids']
output = model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids)
return output, loss_func
# batch should be a list of batches inside micro-batches
batch_generator = make_batch_generator(batches, vpp_size=len(self.reward_model_module))
# TODO: we may use the new schedule instead
# for flash-attn: (seq_len, batch_size, hidden_size) = (mbs*seq_len, 1, hidden_size)
if mpu.get_pipeline_model_parallel_world_size() > 1:
losses_reduced = forward_backward_func(
forward_step_func=forward_step,
data_iterator=batch_generator,
model=self.reward_model_module,
num_microbatches=n_micro_batch,
seq_length=infer_batch_size * seq_len, # no use when input_shapes was set
micro_batch_size=1, # no use when input_shapes was set
forward_only=True,
)
else:
losses_reduced = forward_backward_func(
forward_step_func=forward_step,
data_iterator=batch_generator,
model=self.reward_model_module,
num_microbatches=n_micro_batch,
seq_length=infer_batch_size * seq_len, # in use for pp = 1
micro_batch_size=1, # in use for pp = 1
forward_only=True,
)
# loss_reduces contains the stats returned from loss_func
return losses_reduced
def offload_params_to_cpu(self):
if self.device == 'cuda':
for reward_model_module in self.reward_model_module:
for name, param in reward_model_module.named_parameters():
param.data = param.data.to('cpu', non_blocking=True)
self.device = 'cpu'
torch.cuda.empty_cache()
def load_params_to_cuda(self):
if self.device == 'cpu':
for reward_model_module in self.reward_model_module:
for name, param in reward_model_module.named_parameters():
param.data = param.data.to(torch.cuda.current_device(), non_blocking=True)
self.device = 'cuda'
================================================
FILE: verl/workers/rollout/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import BaseRollout
from .naive import NaiveRollout
from .hf_rollout import HFRollout
__all__ = ["BaseRollout", "NaiveRollout", "HFRollout"]
================================================
FILE: verl/workers/rollout/base.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Iterable, Union
from verl import DataProto
__all__ = ['BaseRollout']
class BaseRollout(ABC):
def __init__(self):
"""
Args:
dataloader: an Iterable of TensorDict that consistently generates prompts. Note that the dataloader
should handle when the training stops.
"""
super().__init__()
@abstractmethod
def generate_sequences(self, prompts: DataProto) -> DataProto:
"""Generate sequences"""
pass
================================================
FILE: verl/workers/rollout/hf_rollout.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Rollout with huggingface models.
TODO: refactor this class. Currently, it will hang when using FSDP HybridShard. We should actually create a single GPU model.
Then, get full state_dict and bind the state_dict to the single GPU model. Then, use the single GPU model to perform generation.
"""
import contextlib
import torch
import torch.distributed
from tensordict import TensorDict
from torch import nn
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from verl import DataProto
from verl.utils.torch_functional import get_eos_mask
from .base import BaseRollout
from transformers import GenerationConfig
__all__ = ['HFRollout']
class HFRollout(BaseRollout):
def __init__(self, module: nn.Module, config):
super().__init__()
self.config = config
self.module = module
def generate_sequences(self, prompts: DataProto) -> DataProto:
batch_size = prompts.batch.batch_size[0]
num_chunks = max(batch_size // self.config.get('micro_batch_size', batch_size), 1)
batch_prompts = prompts.chunk(chunks=num_chunks)
output = [self._generate_minibatch(p) for p in batch_prompts]
output = DataProto.concat(output)
return output
@torch.no_grad()
def _generate_minibatch(self, prompts: DataProto) -> DataProto:
idx = prompts.batch['input_ids'] # (bs, prompt_length)
attention_mask = prompts.batch['attention_mask'] # left-padded attention_mask
position_ids = prompts.batch['position_ids']
# used to construct attention_mask
eos_token_id = prompts.meta_info['eos_token_id']
pad_token_id = prompts.meta_info['pad_token_id']
batch_size = idx.size(0)
prompt_length = idx.size(1)
self.module.eval()
param_ctx = contextlib.nullcontext()
# make sampling args can be overriden by inputs
do_sample = prompts.meta_info.get('do_sample', self.config.do_sample)
response_length = prompts.meta_info.get('response_length', self.config.response_length)
top_p = prompts.meta_info.get('top_p', self.config.get('top_p', 1.0))
top_k = prompts.meta_info.get('top_k', self.config.get('top_k', 0))
if top_k is None:
top_k = 0
top_k = max(0, top_k) # to be compatible with vllm
temperature = prompts.meta_info.get('temperature', self.config.temperature)
generation_config = GenerationConfig(temperature=temperature, top_p=top_p, top_k=top_k)
if isinstance(self.module, FSDP):
# recurse need to set to False according to https://github.com/pytorch/pytorch/issues/100069
param_ctx = FSDP.summon_full_params(self.module, writeback=False, recurse=False)
with param_ctx:
with torch.autocast(device_type='cuda', dtype=torch.bfloat16):
output = self.module.generate(
input_ids=idx,
attention_mask=attention_mask,
do_sample=do_sample,
max_new_tokens=response_length,
# max_length=max_length,
eos_token_id=eos_token_id,
pad_token_id=pad_token_id,
generation_config=generation_config,
# renormalize_logits=True,
output_scores=False, # this is potentially very large
return_dict_in_generate=True,
use_cache=True)
# TODO: filter out the seq with no answers like ds-chat
seq = output.sequences
# huggingface generate will stop generating when all the batch reaches [EOS].
# We have to pad to response_length
sequence_length = prompt_length + self.config.response_length
delta_length = sequence_length - seq.shape[1]
if delta_length > 0:
delta_tokens = torch.ones(size=(batch_size, delta_length), device=seq.device, dtype=seq.dtype)
delta_tokens = pad_token_id * delta_tokens
seq = torch.cat((seq, delta_tokens), dim=1)
assert seq.shape[1] == sequence_length
prompt = seq[:, :prompt_length] # (bs, prompt_length)
response = seq[:, prompt_length:] # (bs, response_length)
response_length = response.size(1)
delta_position_id = torch.arange(1, response_length + 1, device=position_ids.device)
delta_position_id = delta_position_id.unsqueeze(0).repeat(batch_size, 1)
response_position_ids = position_ids[:, -1:] + delta_position_id
position_ids = torch.cat([position_ids, response_position_ids], dim=-1)
response_attention_mask = get_eos_mask(response_id=response, eos_token=eos_token_id, dtype=attention_mask.dtype)
attention_mask = torch.cat((attention_mask, response_attention_mask), dim=-1)
batch = TensorDict(
{
'prompts': prompt,
'responses': response,
'input_ids': seq,
'attention_mask': attention_mask,
'position_ids': position_ids
},
batch_size=batch_size)
# empty cache before compute old_log_prob
torch.cuda.empty_cache()
self.module.train()
return DataProto(batch=batch)
================================================
FILE: verl/workers/rollout/naive/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .naive_rollout import NaiveRollout
================================================
FILE: verl/workers/rollout/naive/naive_rollout.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In single GPU rollout, the sequences are generated directly by sampling from the model.
The output will contain
1. output_ids
2. attention_masks (left padding)
3. eos_masks
4. log_probs
"""
from typing import Iterable, Union
import torch
import torch.nn.functional as F
from tensordict import TensorDict
from torch import nn
from verl import DataProto
from verl.utils.torch_functional import logprobs_from_logits
from ..base import BaseRollout
__all__ = ['NaiveRollout']
class NaiveRollout(BaseRollout):
def __init__(self, module: nn.Module, config):
"""A naive rollout. It requires the module to be compatible with huggingface APIs. That is:
The module should define __call__ to receive input_ids, attention_mask and position_ids.
It outputs a structure that contains logits field.
Args:
module: module here follows huggingface APIs
config: DictConfig
"""
super().__init__()
self.config = config
self.module = module
@torch.no_grad()
def generate_sequences(self, prompts: DataProto) -> DataProto:
"""Generate sequences"""
idx = prompts.batch['input_ids'] # (bs, prompt_length)
attention_mask = prompts.batch['attention_mask'] # left-padded attention_mask
position_ids = prompts.batch['position_ids']
# used to construct attention_mask
eos_token_id = prompts.meta_info['eos_token_id']
if isinstance(eos_token, int):
eos_token = [eos_token]
batch_size = idx.size(0)
prompt_length = idx.size(1)
self.module.eval()
prev_attention_mask = torch.ones(size=(batch_size, 1), dtype=attention_mask.dtype, device=attention_mask.device)
logits_lst = []
for _ in range(self.config.response_length):
# if the sequence context is growing too long we must crop it at block_size
# idx_cond = idx if idx.size(1) <= self.config.block_size else idx[:, -self.config.block_size:]
idx_cond = idx
# forward the model to get the logits for the index in the sequence
# we use huggingface APIs here
output = self.module(input_ids=idx_cond, attention_mask=attention_mask, position_ids=position_ids)
logits = output.logits
# pluck the logits at the final step and scale by desired temperature
logits = logits[:, -1, :] / self.config.temperature # (bs, vocab_size)
# optionally crop the logits to only the top k options
if self.config.top_k is not None:
v, _ = torch.topk(logits, min(self.config.top_k, logits.size(-1)))
logits[logits < v[:, [-1]]] = -float('Inf')
# apply softmax to convert logits to (normalized) probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution
if self.config.do_sample:
idx_next = torch.multinomial(probs, num_samples=1)
else:
idx_next = torch.argmax(probs, dim=-1, keepdim=True)
attention_mask = torch.cat((attention_mask, prev_attention_mask), dim=-1)
for token_id in eos_token_id:
prev_attention_mask = torch.logical_and(idx_next != token_id, prev_attention_mask.bool())
prev_attention_mask.to(attention_mask.dtype)
position_ids = torch.cat((position_ids, position_ids[:, -1:] + 1), dim=-1)
# append sampled index to the running sequence and continue
idx = torch.cat((idx, idx_next), dim=1)
logits_lst.append(logits)
logits = torch.stack(logits_lst, dim=1) # (bs, response_length, vocab_size)
prompts = idx[:, :prompt_length] # (bs, prompt_length)
response = idx[:, prompt_length:] # (bs, response_length)
log_probs = logprobs_from_logits(logits=logits, labels=response)
batch = TensorDict(
{
'input_ids': prompts,
'responses': response,
'sequences': idx,
'old_log_probs': log_probs,
'attention_mask': attention_mask,
'position_ids': position_ids,
},
batch_size=batch_size)
self.module.train()
return DataProto(batch=batch)
================================================
FILE: verl/workers/rollout/tokenizer.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The base tokenizer class, required for any hybrid engine based rollout or inference with vLLM.
"""
from abc import ABC, abstractmethod
from typing import Dict, List, Union
__all__ = ['HybridEngineBaseTokenizer']
class HybridEngineBaseTokenizer(ABC):
"""the tokenizer property and function name should align with HF's to meet vllm requirement"""
@property
@abstractmethod
def vocab_size(self):
"""
`int`: Size of the base vocabulary (without the added tokens).
"""
pass
@property
@abstractmethod
def pad_token_id(self):
"""
`Optional[int]`: Id of the padding token in the vocabulary. Returns `None` if the token has not been set.
"""
pass
@property
@abstractmethod
def eos_token_id(self):
"""
`Optional[int]`: Id of the end of sentence token in the vocabulary. Returns `None` if the token has not been
set.
"""
pass
@property
@abstractmethod
def all_special_ids(self) -> List[int]:
"""
`List[int]`: List the ids of the special tokens(`''`, `''`, etc.) mapped to class attributes.
"""
pass
@property
@abstractmethod
def all_special_tokens(self) -> List[str]:
"""
`List[str]`: A list of the unique special tokens (`''`, `''`, ..., etc.).
Convert tokens of `tokenizers.AddedToken` type to string.
"""
pass
@abstractmethod
def encode(self, text):
"""
Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.
Args:
text (`str`, `List[str]` or `List[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
`tokenize` method) or a list of integers.
text_pair (`str`, `List[str]` or `List[int]`, *optional*):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers.
"""
pass
@abstractmethod
def decode(
self,
token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: bool = None,
**kwargs,
) -> str:
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
tokens and clean up tokenization spaces.
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
Args:
token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces`.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`str`: The decoded sentence.
"""
pass
@abstractmethod
def convert_ids_to_tokens(self,
ids: Union[int, List[int]],
skip_special_tokens: bool = False) -> Union[str, List[str]]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary and
added tokens.
Args:
ids (`int` or `List[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
Returns:
`str` or `List[str]`: The decoded token(s).
"""
pass
@abstractmethod
def get_added_vocab(self) -> Dict[str, int]:
"""
Returns the added tokens in the vocabulary as a dictionary of token to index. Results might be different from
the fast call because for now we always add the tokens even if they are already in the vocabulary. This is
something we should change.
Returns:
`Dict[str, int]`: The added tokens.
"""
pass
@abstractmethod
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""
Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we
often want to remove sub-word tokenization artifacts at the same time.
Args:
tokens (`List[str]`): The token to join in a string.
Returns:
`str`: The joined tokens.
"""
pass
@property
def is_fast(self):
return False
================================================
FILE: verl/workers/rollout/vllm_rollout/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from importlib.metadata import version, PackageNotFoundError
def get_version(pkg):
try:
return version(pkg)
except PackageNotFoundError:
return None
package_name = 'vllm'
package_version = get_version(package_name)
if package_version <= '0.6.3':
vllm_mode = 'customized'
from .vllm_rollout import vLLMRollout
from .fire_vllm_rollout import FIREvLLMRollout
else:
vllm_mode = 'spmd'
from .vllm_rollout_spmd import vLLMRollout
================================================
FILE: verl/workers/rollout/vllm_rollout/fire_vllm_rollout.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The vllm_rollout that can be applied in different backend
When working with FSDP:
- Use DTensor weight loader (recommended) or HF weight loader
- Utilize state_dict from the FSDP to synchronize the weights among tp ranks in vLLM
When working with Megatron:
- Use Megatron weight loader
- During training, only the current pp stage holds the parameters
- Before inference, broadcast the parameters of the current pp rank to all other pp ranks (all pp ranks holds all the parameters)
- Bind the parameters to the inference engine
- Do inference in tp. pp is treated as additional dp
- After inference, all the parameters that doesn't belong to this pp rank is freed.
"""
from typing import List
from contextlib import contextmanager
from omegaconf import DictConfig
import torch
import torch.distributed
from tensordict import TensorDict
from torch import nn
from verl import DataProto
from verl.utils.torch_functional import get_eos_mask, pad_sequence_to_length
from verl.workers.rollout.base import BaseRollout
from verl.workers.rollout.vllm_rollout.vllm_rollout import vLLMRollout
from verl.third_party.vllm import LLM, vllm_version
from verl.third_party.vllm import parallel_state as vllm_ps
from vllm import SamplingParams
# TODO
# 1. support pp in vllm
# 2. passing tokenizer is not necessary? no encoding/decoding is happending here
# 3. simplify init logics
# NOTE(sgm): add for verl. We can optimize it by making the dataloader yield List[int] without padding.
def _pre_process_inputs(pad_token_id, prompt_token_ids: torch.Tensor) -> List[int]:
# remove the left padding in the prompt token_id
# pad_token_id = self.llm_engine.tokenizer.pad_token_id if self.llm_engine.tokenizer.pad_token_id is not None else self.llm_engine.tokenizer.eos_token_id
non_pad_index = torch.nonzero(prompt_token_ids != pad_token_id, as_tuple=False)[0][0]
token_ids = prompt_token_ids[non_pad_index:].tolist()
return token_ids
class FIREvLLMRollout(vLLMRollout):
def __init__(self, actor_module: nn.Module, config: DictConfig, tokenizer, model_hf_config, **kwargs):
"""A vLLM rollout. It requires the module is supported by the vllm.
Args:
module: module here follows huggingface APIs
config: DictConfig
tokenizer: the task/model tokenizer
model_hf_config: the huggingface config to initiallize the generating model in vllm
**kwargs: train_tp, for Megatron Backend to initialize hybrid engine (zero redundancy) process group
"""
super().__init__(actor_module, config, tokenizer, model_hf_config, **kwargs)
self.use_fire_sampling = config.get('use_fire_sampling', False)
if self.use_fire_sampling:
kwargs_0 = kwargs.copy()
kwargs_0['temperature'] = 30
kwargs_0['max_tokens'] = 1
if 'top_k' not in kwargs_0 or kwargs_0['top_k'] <= 0:
kwargs_0['top_k'] = 16
kwargs['max_tokens'] -= 1
self.sampling_params_0 = SamplingParams(**kwargs_0)
@contextmanager
def update_sampling_params(self, **kwargs):
# update sampling params
old_sampling_params_args = {}
if kwargs:
for key, value in kwargs.items():
if hasattr(self.sampling_params, key):
old_value = getattr(self.sampling_params, key)
old_sampling_params_args[key] = old_value
setattr(self.sampling_params, key, value)
if self.use_fire_sampling:
old_sampling_params_args_0 = {}
if kwargs:
for key, value in kwargs.items():
if hasattr(self.sampling_params_0, key):
old_value = getattr(self.sampling_params_0, key)
old_sampling_params_args_0[key] = old_value
setattr(self.sampling_params_0, key, value)
yield
# roll back to previous sampling params
# if len(old_sampling_params_args):
for key, value in old_sampling_params_args.items():
setattr(self.sampling_params, key, value)
if self.use_fire_sampling:
for key, value in old_sampling_params_args_0.items():
setattr(self.sampling_params_0, key, value)
@torch.no_grad()
def generate_sequences(self, prompts: DataProto, **kwargs) -> DataProto:
# rebuild vllm cache engine
if self.config.free_cache_engine:
self.inference_engine.init_cache_engine()
idx = prompts.batch['input_ids'] # (bs, prompt_length)
# left-padded attention_mask
attention_mask = prompts.batch['attention_mask']
position_ids = prompts.batch['position_ids']
# used to construct attention_mask
eos_token_id = prompts.meta_info['eos_token_id']
batch_size = idx.size(0)
idx_list = []
# parse idx from torch.Tensor to List[List[str]]
for i in range(batch_size):
idx_list.append(_pre_process_inputs(self.pad_token_id, idx[i]))
do_sample = prompts.meta_info.get('do_sample', True)
if not do_sample:
kwargs = {
'best_of': 1,
'top_p': 1.0,
'top_k': -1,
'min_p': 0.0,
'temperature': 0,
'n': 1 # if greedy, only 1 response
}
if not self.use_fire_sampling:
# users can customize different sampling_params at different run
with self.update_sampling_params(**kwargs):
output = self.inference_engine.generate(
prompts=None, # because we have already convert it to prompt token id
sampling_params=self.sampling_params,
prompt_token_ids=idx_list,
use_tqdm=False)
response = output[0].to(idx.device) # (bs, response_length)
log_probs = output[1].to(idx.device) # (bs, response_length)
else:
with self.update_sampling_params(**kwargs):
output_0 = self.inference_engine.generate(
prompts=None, # because we have already convert it to prompt token id
sampling_params=self.sampling_params_0,
prompt_token_ids=idx_list,
use_tqdm=False)
new_idx_list = []
for i in range(batch_size):
new_idx_list.append(idx_list[i] + output_0[0][i].tolist())
output = self.inference_engine.generate(
prompts=None, # because we have already convert it to prompt token id
sampling_params=self.sampling_params,
prompt_token_ids=new_idx_list,
use_tqdm=False)
response = torch.cat([output_0[0], output[0]], dim=1).to(idx.device) # (bs, response_length)
log_probs = torch.cat([output_0[1], output[1]], dim=1).to(idx.device) # (bs, response_length)
if response.shape[1] < self.config.response_length:
response = pad_sequence_to_length(response, self.config.response_length, self.pad_token_id)
log_probs = pad_sequence_to_length(log_probs, self.config.response_length, self.pad_token_id)
if self.config.n > 1 and do_sample:
idx = idx.repeat_interleave(self.config.n, dim=0)
attention_mask = attention_mask.repeat_interleave(self.config.n, dim=0)
position_ids = position_ids.repeat_interleave(self.config.n, dim=0)
batch_size = batch_size * self.config.n
seq = torch.cat([idx, response], dim=-1)
response_length = response.size(1)
delta_position_id = torch.arange(1, response_length + 1, device=position_ids.device)
delta_position_id = delta_position_id.unsqueeze(0).repeat(batch_size, 1)
# TODO(sgm): fix position_ids on right_pad
# prompt: left pad + response: right pad
# attention_mask: [0,0,0,0,1,1,1,1, | 1,1,1,0,0,0,0,0]
# position_ids: [0,0,0,0,0,1,2,3, | 4,5,6,7,8,9,10,11]
response_position_ids = position_ids[:, -1:] + delta_position_id
position_ids = torch.cat([position_ids, response_position_ids], dim=-1)
response_attention_mask = get_eos_mask(response_id=response, eos_token=eos_token_id, dtype=attention_mask.dtype)
attention_mask = torch.cat((attention_mask, response_attention_mask), dim=-1)
# all the tp ranks should contain the same data here. data in all ranks are valid
batch = TensorDict(
{
'prompts': idx,
'responses': response,
'input_ids': seq, # here input_ids become the whole sentences
# 'old_log_probs': log_probs, # we will recompute old log prob with actor
'attention_mask': attention_mask,
'position_ids': position_ids
},
batch_size=batch_size)
# free vllm cache engine
if self.config.free_cache_engine:
self.inference_engine.free_cache_engine()
return DataProto(batch=batch)
================================================
FILE: verl/workers/rollout/vllm_rollout/qwen_agent/code/code_interpreter.py
================================================
import base64
import io
import json
import logging
import os
import queue
import re
import subprocess
import sys
import time
import traceback
import uuid
import matplotlib
import PIL.Image
from jupyter_client import BlockingKernelClient
from utils.code_utils import extract_code
WORK_DIR = os.getenv('CODE_INTERPRETER_WORK_DIR', '/tmp/workspace')
LAUNCH_KERNEL_PY = """
from ipykernel import kernelapp as app
app.launch_new_instance()
"""
_KERNEL_CLIENTS = {}
# Run this fix before jupyter starts if matplotlib cannot render CJK fonts.
# And we need to additionally run the following lines in the jupyter notebook.
# ```python
# import matplotlib.pyplot as plt
# plt.rcParams['font.sans-serif'] = ['SimHei']
# plt.rcParams['axes.unicode_minus'] = False
# ````
def fix_matplotlib_cjk_font_issue():
local_ttf = os.path.join(os.path.abspath(os.path.join(matplotlib.matplotlib_fname(), os.path.pardir)), 'fonts',
'ttf', 'simhei.ttf')
if not os.path.exists(local_ttf):
logging.warning(
f'Missing font file `{local_ttf}` for matplotlib. It may cause some error when using matplotlib.')
def start_kernel(pid):
fix_matplotlib_cjk_font_issue()
connection_file = os.path.join(WORK_DIR, f'kernel_connection_file_{pid}.json')
launch_kernel_script = os.path.join(WORK_DIR, f'launch_kernel_{pid}.py')
for f in [connection_file, launch_kernel_script]:
if os.path.exists(f):
logging.warning(f'{f} already exists')
os.remove(f)
os.makedirs(WORK_DIR, exist_ok=True)
with open(launch_kernel_script, 'w') as fout:
fout.write(LAUNCH_KERNEL_PY)
kernel_process = subprocess.Popen([
sys.executable,
launch_kernel_script,
'--IPKernelApp.connection_file',
connection_file,
'--matplotlib=inline',
'--quiet',
],
cwd=WORK_DIR)
logging.info(f"INFO: kernel process's PID = {kernel_process.pid}")
# Wait for kernel connection file to be written
while True:
if not os.path.isfile(connection_file):
time.sleep(0.1)
else:
# Keep looping if JSON parsing fails, file may be partially written
try:
with open(connection_file, 'r') as fp:
json.load(fp)
break
except json.JSONDecodeError:
pass
# Client
kc = BlockingKernelClient(connection_file=connection_file)
kc.load_connection_file()
kc.start_channels()
kc.wait_for_ready()
return kc
def escape_ansi(line):
ansi_escape = re.compile(r'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]')
return ansi_escape.sub('', line)
def publish_image_to_local(image_base64: str):
image_file = str(uuid.uuid4()) + '.png'
local_image_file = os.path.join(WORK_DIR, image_file)
png_bytes = base64.b64decode(image_base64)
assert isinstance(png_bytes, bytes)
bytes_io = io.BytesIO(png_bytes)
PIL.Image.open(bytes_io).save(local_image_file, 'png')
return local_image_file
START_CODE = """
import signal
def _m6_code_interpreter_timeout_handler(signum, frame):
raise TimeoutError("M6_CODE_INTERPRETER_TIMEOUT")
signal.signal(signal.SIGALRM, _m6_code_interpreter_timeout_handler)
def input(*args, **kwargs):
raise NotImplementedError('Python input() function is disabled.')
import os
if 'upload_file' not in os.getcwd():
os.chdir("./upload_file/")
import math
import re
import json
import seaborn as sns
sns.set_theme()
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
import numpy as np
import pandas as pd
from sympy import Eq, symbols, solve
"""
def code_interpreter(action_input_list: list, timeout=30, clear=False):
code = ''
for action_input in action_input_list:
code += (extract_code(action_input) + '\n')
fixed_code = []
for line in code.split('\n'):
fixed_code.append(line)
if line.startswith('sns.set_theme('):
fixed_code.append('plt.rcParams["font.sans-serif"] = ["SimHei"]')
fixed_code.append('plt.rcParams["axes.unicode_minus"] = False')
fixed_code = '\n'.join(fixed_code)
if 'def solution()' in fixed_code:
fixed_code += '\nsolution()'
return _code_interpreter(fixed_code, timeout, clear)
def _code_interpreter(code: str, timeout, clear=False):
if not code.strip():
return ''
if timeout:
code = f'signal.alarm({timeout})\n{code}'
if clear:
code = "get_ipython().run_line_magic('reset', '-f')\n" + START_CODE + code
pid = os.getpid()
if pid not in _KERNEL_CLIENTS:
_KERNEL_CLIENTS[pid] = start_kernel(pid)
_code_interpreter(START_CODE, timeout=None)
kc = _KERNEL_CLIENTS[pid]
kc.wait_for_ready()
kc.execute(code)
result = ''
image_idx = 0
while True:
text = ''
image = ''
finished = False
msg_type = 'error'
try:
msg = kc.get_iopub_msg()
msg_type = msg['msg_type']
if msg_type == 'status':
if msg['content'].get('execution_state') == 'idle':
finished = True
elif msg_type == 'execute_result':
text = msg['content']['data'].get('text/plain', '')
if 'image/png' in msg['content']['data']:
image_b64 = msg['content']['data']['image/png']
image_url = publish_image_to_local(image_b64)
image_idx += 1
image = '' % (image_idx, image_url)
elif msg_type == 'display_data':
if 'image/png' in msg['content']['data']:
image_b64 = msg['content']['data']['image/png']
image_url = publish_image_to_local(image_b64)
image_idx += 1
image = '' % (image_idx, image_url)
else:
text = msg['content']['data'].get('text/plain', '')
elif msg_type == 'stream':
msg_type = msg['content']['name'] # stdout, stderr
text = msg['content']['text']
elif msg_type == 'error':
text = escape_ansi('\n'.join(msg['content']['traceback']))
if 'M6_CODE_INTERPRETER_TIMEOUT' in text:
text = f'Timeout. No response after {timeout} seconds.'
except queue.Empty:
text = f'Timeout. No response after {timeout} seconds.'
finished = True
except Exception:
text = 'The code interpreter encountered an unexpected error.'
logging.warning(''.join(traceback.format_exception(*sys.exc_info())))
finished = True
if text:
result += f'\n\n{msg_type}:\n\n```\n{text}\n```'
if image:
result += f'\n\n{image}'
if finished:
break
result = result.lstrip('\n')
if timeout:
_code_interpreter('signal.alarm(0)', timeout=None)
return result
def get_multiline_input(hint):
print(hint)
print('// Press ENTER to make a new line. Press CTRL-D to end input.')
lines = []
while True:
try:
line = input()
except EOFError: # CTRL-D
break
lines.append(line)
print('// Input received.')
if lines:
return '\n'.join(lines)
else:
return ''
if __name__ == '__main__':
while True:
print(code_interpreter([get_multiline_input('Enter python code:')]))
================================================
FILE: verl/workers/rollout/vllm_rollout/qwen_agent/code/utils/code_utils.py
================================================
import os
import re
import json5
def replace_upload_fname(text, upload_fname_list):
for full_input_fname in upload_fname_list:
if full_input_fname not in text and os.path.basename(full_input_fname) in text:
text = text.replace(os.path.basename(full_input_fname), full_input_fname)
return text
def extract_code(text):
# Match triple backtick blocks first
triple_match = re.search(r'```[^\n]*\n(.+?)```', text, re.DOTALL)
# Match single backtick blocks second
single_match = re.search(r'`([^`]*)`', text, re.DOTALL)
if triple_match:
text = triple_match.group(1)
elif single_match:
text = single_match.group(1)
else:
try:
text = json5.loads(text)['code']
except Exception:
pass
# If no code blocks found, return original text
return text
================================================
FILE: verl/workers/rollout/vllm_rollout/qwen_agent/llm/schema.py
================================================
from typing import List, Literal, Optional, Tuple, Union
from pydantic import BaseModel, field_validator, model_validator
DEFAULT_SYSTEM_MESSAGE = 'You are a helpful assistant.'
ROLE = 'role'
CONTENT = 'content'
NAME = 'name'
SYSTEM = 'system'
USER = 'user'
ASSISTANT = 'assistant'
FUNCTION = 'function'
FILE = 'file'
IMAGE = 'image'
AUDIO = 'audio'
VIDEO = 'video'
class BaseModelCompatibleDict(BaseModel):
def __getitem__(self, item):
return getattr(self, item)
def __setitem__(self, key, value):
setattr(self, key, value)
def model_dump(self, **kwargs):
if 'exclude_none' not in kwargs:
kwargs['exclude_none'] = True
return super().model_dump(**kwargs)
def model_dump_json(self, **kwargs):
if 'exclude_none' not in kwargs:
kwargs['exclude_none'] = True
return super().model_dump_json(**kwargs)
def get(self, key, default=None):
try:
value = getattr(self, key)
if value:
return value
else:
return default
except AttributeError:
return default
def __str__(self):
return f'{self.model_dump()}'
class FunctionCall(BaseModelCompatibleDict):
name: str
arguments: str
def __init__(self, name: str, arguments: str):
super().__init__(name=name, arguments=arguments)
def __repr__(self):
return f'FunctionCall({self.model_dump()})'
class ContentItem(BaseModelCompatibleDict):
text: Optional[str] = None
image: Optional[str] = None
file: Optional[str] = None
audio: Optional[str] = None
video: Optional[Union[str, list]] = None
def __init__(self,
text: Optional[str] = None,
image: Optional[str] = None,
file: Optional[str] = None,
audio: Optional[str] = None,
video: Optional[Union[str, list]] = None):
super().__init__(text=text, image=image, file=file, audio=audio, video=video)
@model_validator(mode='after')
def check_exclusivity(self):
provided_fields = 0
if self.text is not None:
provided_fields += 1
if self.image:
provided_fields += 1
if self.file:
provided_fields += 1
if self.audio:
provided_fields += 1
if self.video:
provided_fields += 1
if provided_fields != 1:
raise ValueError("Exactly one of 'text', 'image', 'file', 'audio', or 'video' must be provided.")
return self
def __repr__(self):
return f'ContentItem({self.model_dump()})'
def get_type_and_value(self) -> Tuple[Literal['text', 'image', 'file', 'audio', 'video'], str]:
(t, v), = self.model_dump().items()
assert t in ('text', 'image', 'file', 'audio', 'video')
return t, v
@property
def type(self) -> Literal['text', 'image', 'file', 'audio', 'video']:
t, v = self.get_type_and_value()
return t
@property
def value(self) -> str:
t, v = self.get_type_and_value()
return v
class Message(BaseModelCompatibleDict):
role: str
content: Union[str, List[ContentItem]]
name: Optional[str] = None
function_call: Optional[FunctionCall] = None
extra: Optional[dict] = None
def __init__(self,
role: str,
content: Optional[Union[str, List[ContentItem]]],
name: Optional[str] = None,
function_call: Optional[FunctionCall] = None,
extra: Optional[dict] = None,
**kwargs):
if content is None:
content = ''
super().__init__(role=role, content=content, name=name, function_call=function_call, extra=extra)
def __repr__(self):
return f'Message({self.model_dump()})'
@field_validator('role')
def role_checker(cls, value: str) -> str:
if value not in [USER, ASSISTANT, SYSTEM, FUNCTION]:
raise ValueError(f'{value} must be one of {",".join([USER, ASSISTANT, SYSTEM, FUNCTION])}')
return value
================================================
FILE: verl/workers/rollout/vllm_rollout/qwen_agent/log.py
================================================
import logging
import os
def setup_logger(level=None):
if level is None:
if os.getenv('QWEN_AGENT_DEBUG', '0').strip().lower() in ('1', 'true'):
level = logging.DEBUG
else:
level = logging.INFO
handler = logging.StreamHandler()
# Do not run handler.setLevel(level) so that users can change the level via logger.setLevel later
formatter = logging.Formatter('%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
_logger = logging.getLogger('qwen_agent_logger')
_logger.setLevel(level)
_logger.addHandler(handler)
return _logger
logger = setup_logger()
================================================
FILE: verl/workers/rollout/vllm_rollout/qwen_agent/settings.py
================================================
import ast
import os
from typing import List, Literal
# Settings for LLMs
DEFAULT_MAX_INPUT_TOKENS: int = int(os.getenv(
'QWEN_AGENT_DEFAULT_MAX_INPUT_TOKENS', 30000)) # The LLM will truncate the input messages if they exceed this limit
# Settings for agents
MAX_LLM_CALL_PER_RUN: int = int(os.getenv('QWEN_AGENT_MAX_LLM_CALL_PER_RUN', 8))
# Settings for tools
DEFAULT_WORKSPACE: str = os.getenv('QWEN_AGENT_DEFAULT_WORKSPACE', 'workspace')
# Settings for RAG
DEFAULT_MAX_REF_TOKEN: int = int(os.getenv('QWEN_AGENT_DEFAULT_MAX_REF_TOKEN',
20000)) # The window size reserved for RAG materials
DEFAULT_PARSER_PAGE_SIZE: int = int(os.getenv('QWEN_AGENT_DEFAULT_PARSER_PAGE_SIZE',
500)) # Max tokens per chunk when doing RAG
DEFAULT_RAG_KEYGEN_STRATEGY: Literal['None', 'GenKeyword', 'SplitQueryThenGenKeyword', 'GenKeywordWithKnowledge',
'SplitQueryThenGenKeywordWithKnowledge'] = os.getenv(
'QWEN_AGENT_DEFAULT_RAG_KEYGEN_STRATEGY', 'GenKeyword')
DEFAULT_RAG_SEARCHERS: List[str] = ast.literal_eval(
os.getenv('QWEN_AGENT_DEFAULT_RAG_SEARCHERS',
"['keyword_search', 'front_page_search']")) # Sub-searchers for hybrid retrieval
================================================
FILE: verl/workers/rollout/vllm_rollout/qwen_agent/tools/base.py
================================================
import json
import os
from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Union
from qwen_agent.llm.schema import ContentItem
from qwen_agent.settings import DEFAULT_WORKSPACE
from qwen_agent.utils.utils import has_chinese_chars, json_loads, logger, print_traceback, save_url_to_local_work_dir
TOOL_REGISTRY = {}
class ToolServiceError(Exception):
def __init__(self,
exception: Optional[Exception] = None,
code: Optional[str] = None,
message: Optional[str] = None,
extra: Optional[dict] = None):
if exception is not None:
super().__init__(exception)
else:
super().__init__(f'\nError code: {code}. Error message: {message}')
self.exception = exception
self.code = code
self.message = message
self.extra = extra
def register_tool(name, allow_overwrite=False):
def decorator(cls):
if name in TOOL_REGISTRY:
if allow_overwrite:
logger.warning(f'Tool `{name}` already exists! Overwriting with class {cls}.')
else:
raise ValueError(f'Tool `{name}` already exists! Please ensure that the tool name is unique.')
if cls.name and (cls.name != name):
raise ValueError(f'{cls.__name__}.name="{cls.name}" conflicts with @register_tool(name="{name}").')
cls.name = name
TOOL_REGISTRY[name] = cls
return cls
return decorator
def is_tool_schema(obj: dict) -> bool:
"""
Check if obj is a valid JSON schema describing a tool compatible with OpenAI's tool calling.
Example valid schema:
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
"""
import jsonschema
try:
assert set(obj.keys()) == {'name', 'description', 'parameters'}
assert isinstance(obj['name'], str)
assert obj['name'].strip()
assert isinstance(obj['description'], str)
assert isinstance(obj['parameters'], dict)
assert set(obj['parameters'].keys()) == {'type', 'properties', 'required'}
assert obj['parameters']['type'] == 'object'
assert isinstance(obj['parameters']['properties'], dict)
assert isinstance(obj['parameters']['required'], list)
assert set(obj['parameters']['required']).issubset(set(obj['parameters']['properties'].keys()))
except AssertionError:
return False
try:
jsonschema.validate(instance={}, schema=obj['parameters'])
except jsonschema.exceptions.SchemaError:
return False
except jsonschema.exceptions.ValidationError:
pass
return True
class BaseTool(ABC):
name: str = ''
description: str = ''
parameters: Union[List[dict], dict] = []
def __init__(self, cfg: Optional[dict] = None):
self.cfg = cfg or {}
if not self.name:
raise ValueError(
f'You must set {self.__class__.__name__}.name, either by @register_tool(name=...) or explicitly setting {self.__class__.__name__}.name'
)
if isinstance(self.parameters, dict):
if not is_tool_schema({'name': self.name, 'description': self.description, 'parameters': self.parameters}):
raise ValueError(
'The parameters, when provided as a dict, must confirm to a valid openai-compatible JSON schema.')
@abstractmethod
def call(self, params: Union[str, dict], **kwargs) -> Union[str, list, dict, List[ContentItem]]:
"""The interface for calling tools.
Each tool needs to implement this function, which is the workflow of the tool.
Args:
params: The parameters of func_call.
kwargs: Additional parameters for calling tools.
Returns:
The result returned by the tool, implemented in the subclass.
"""
raise NotImplementedError
def _verify_json_format_args(self, params: Union[str, dict], strict_json: bool = False) -> dict:
"""Verify the parameters of the function call"""
if isinstance(params, str):
try:
if strict_json:
params_json: dict = json.loads(params)
else:
params_json: dict = json_loads(params)
except json.decoder.JSONDecodeError:
raise ValueError('Parameters must be formatted as a valid JSON!')
else:
params_json: dict = params
if isinstance(self.parameters, list):
for param in self.parameters:
if 'required' in param and param['required']:
if param['name'] not in params_json:
raise ValueError('Parameters %s is required!' % param['name'])
elif isinstance(self.parameters, dict):
import jsonschema
jsonschema.validate(instance=params_json, schema=self.parameters)
else:
raise ValueError
return params_json
@property
def function(self) -> dict: # Bad naming. It should be `function_info`.
return {
'name_for_human': self.name_for_human,
'name': self.name,
'description': self.description,
'parameters': self.parameters,
'args_format': self.args_format
}
@property
def name_for_human(self) -> str:
return self.cfg.get('name_for_human', self.name)
@property
def args_format(self) -> str:
fmt = self.cfg.get('args_format')
if fmt is None:
if has_chinese_chars([self.name_for_human, self.name, self.description, self.parameters]):
fmt = '此工具的输入应为JSON对象。'
else:
fmt = 'Format the arguments as a JSON object.'
return fmt
@property
def file_access(self) -> bool:
return False
class BaseToolWithFileAccess(BaseTool, ABC):
def __init__(self, cfg: Optional[Dict] = None):
super().__init__(cfg)
assert self.name
default_work_dir = os.path.join(DEFAULT_WORKSPACE, 'tools', self.name)
self.work_dir: str = self.cfg.get('work_dir', default_work_dir)
@property
def file_access(self) -> bool:
return True
def call(self, params: Union[str, dict], files: List[str] = None, **kwargs) -> str:
# Copy remote files to the working directory:
if files:
os.makedirs(self.work_dir, exist_ok=True)
for file in files:
try:
save_url_to_local_work_dir(file, self.work_dir)
except Exception:
print_traceback()
# Then do something with the files:
# ...
================================================
FILE: verl/workers/rollout/vllm_rollout/qwen_agent/tools/code_interpreter.py
================================================
import asyncio
import atexit
import base64
import glob
import io
import json
import os
import queue
import re
import shutil
import signal
import stat
import subprocess
import sys
import time
import uuid
import threading
from pathlib import Path
from typing import Dict, List, Optional, Union
import json5
from qwen_agent.log import logger
from qwen_agent.tools.base import BaseToolWithFileAccess, register_tool
from qwen_agent.utils.utils import append_signal_handler, extract_code, has_chinese_chars, print_traceback
LAUNCH_KERNEL_PY = """
from ipykernel import kernelapp as app
app.launch_new_instance()
"""
INIT_CODE_FILE = str(Path(__file__).absolute().parent / 'resource' / 'code_interpreter_init_kernel.py')
ALIB_FONT_FILE = str(Path(__file__).absolute().parent / 'resource' / 'AlibabaPuHuiTi-3-45-Light.ttf')
_KERNEL_CLIENTS: dict = {}
_MISC_SUBPROCESSES: Dict[str, subprocess.Popen] = {}
def _kill_kernels_and_subprocesses(_sig_num=None, _frame=None):
for v in _KERNEL_CLIENTS.values():
v.shutdown()
for k in list(_KERNEL_CLIENTS.keys()):
del _KERNEL_CLIENTS[k]
for v in _MISC_SUBPROCESSES.values():
v.terminate()
for k in list(_MISC_SUBPROCESSES.keys()):
del _MISC_SUBPROCESSES[k]
# Make sure all subprocesses are terminated even if killed abnormally:
# If not running in the main thread, (for example run in streamlit)
# register a signal would cause a RuntimeError
if threading.current_thread() is threading.main_thread():
atexit.register(_kill_kernels_and_subprocesses)
append_signal_handler(signal.SIGTERM, _kill_kernels_and_subprocesses)
append_signal_handler(signal.SIGINT, _kill_kernels_and_subprocesses)
@register_tool('code_interpreter')
class CodeInterpreter(BaseToolWithFileAccess):
description = 'Python代码沙盒,可用于执行Python代码。'
parameters = [{'name': 'code', 'type': 'string', 'description': '待执行的代码', 'required': True}]
def __init__(self, cfg: Optional[Dict] = None):
super().__init__(cfg)
self.work_dir: str = os.getenv('M6_CODE_INTERPRETER_WORK_DIR', self.work_dir)
self.work_dir: str = self.cfg.get('work_dir', self.work_dir)
self.instance_id: str = str(uuid.uuid4())
_check_deps_for_code_interpreter()
@property
def args_format(self) -> str:
fmt = self.cfg.get('args_format')
if fmt is None:
if has_chinese_chars([self.name_for_human, self.name, self.description, self.parameters]):
fmt = '此工具的输入应为Markdown代码块。'
else:
fmt = 'Enclose the code within triple backticks (`) at the beginning and end of the code.'
return fmt
def call(self, params: Union[str, dict], files: List[str] = None, timeout: Optional[int] = 1, **kwargs) -> str:
super().call(params=params, files=files) # copy remote files to work_dir
try:
params = json5.loads(params)
code = params['code']
except Exception:
code = extract_code(params)
if not code.strip():
return ''
kernel_id: str = f'{self.instance_id}_{os.getpid()}'
if kernel_id in _KERNEL_CLIENTS:
kc = _KERNEL_CLIENTS[kernel_id]
else:
_fix_matplotlib_cjk_font_issue()
self._fix_secure_write_for_code_interpreter()
kc, subproc = self._start_kernel(kernel_id)
with open(INIT_CODE_FILE) as fin:
start_code = fin.read()
start_code = start_code.replace('{{M6_FONT_PATH}}', repr(ALIB_FONT_FILE)[1:-1])
start_code += '\n%xmode Minimal'
self._execute_code(kc, start_code)
# logger.info(self._execute_code(kc, start_code))
_KERNEL_CLIENTS[kernel_id] = kc
_MISC_SUBPROCESSES[kernel_id] = subproc
if timeout:
code = f'_M6CountdownTimer.start({timeout})\n{code}'
fixed_code = []
for line in code.split('\n'):
fixed_code.append(line)
if line.startswith('sns.set_theme('):
fixed_code.append('plt.rcParams["font.family"] = _m6_font_prop.get_name()')
fixed_code = '\n'.join(fixed_code)
fixed_code += '\n\n' # Prevent code not executing in notebook due to no line breaks at the end
result = self._execute_code(kc, fixed_code)
if timeout:
self._execute_code(kc, '_M6CountdownTimer.cancel()')
return result if result.strip() else 'Finished execution.'
def __del__(self):
# Recycle the jupyter subprocess:
k: str = f'{self.instance_id}_{os.getpid()}'
if k in _KERNEL_CLIENTS:
_KERNEL_CLIENTS[k].shutdown()
del _KERNEL_CLIENTS[k]
if k in _MISC_SUBPROCESSES:
_MISC_SUBPROCESSES[k].terminate()
del _MISC_SUBPROCESSES[k]
def _fix_secure_write_for_code_interpreter(self):
if 'linux' in sys.platform.lower():
os.makedirs(self.work_dir, exist_ok=True)
fname = os.path.join(self.work_dir, f'test_file_permission_{os.getpid()}.txt')
if os.path.exists(fname):
os.remove(fname)
with os.fdopen(os.open(fname, os.O_CREAT | os.O_WRONLY | os.O_TRUNC, 0o0600), 'w') as f:
f.write('test')
file_mode = stat.S_IMODE(os.stat(fname).st_mode) & 0o6677
if file_mode != 0o0600:
os.environ['JUPYTER_ALLOW_INSECURE_WRITES'] = '1'
if os.path.exists(fname):
os.remove(fname)
def _start_kernel(self, kernel_id: str):
connection_file = os.path.join(self.work_dir, f'kernel_connection_file_{kernel_id}.json')
launch_kernel_script = os.path.join(self.work_dir, f'launch_kernel_{kernel_id}.py')
for f in [connection_file, launch_kernel_script]:
if os.path.exists(f):
logger.info(f'WARNING: {f} already exists')
os.remove(f)
os.makedirs(self.work_dir, exist_ok=True)
with open(launch_kernel_script, 'w') as fout:
fout.write(LAUNCH_KERNEL_PY)
kernel_process = subprocess.Popen(
[
sys.executable,
os.path.abspath(launch_kernel_script),
'--IPKernelApp.connection_file',
os.path.abspath(connection_file),
'--matplotlib=inline',
'--quiet',
],
cwd=os.path.abspath(self.work_dir),
)
logger.info(f"INFO: kernel process's PID = {kernel_process.pid}")
# Wait for kernel connection file to be written
while True:
if not os.path.isfile(connection_file):
time.sleep(0.1)
else:
# Keep looping if JSON parsing fails, file may be partially written
try:
with open(connection_file, 'r') as fp:
json.load(fp)
break
except json.JSONDecodeError:
pass
# Client
from jupyter_client import BlockingKernelClient
kc = BlockingKernelClient(connection_file=connection_file)
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
kc.load_connection_file()
kc.start_channels()
kc.wait_for_ready()
return kc, kernel_process
def _execute_code(self, kc, code: str) -> str:
kc.wait_for_ready()
kc.execute(code)
result = ''
image_idx = 0
while True:
text = ''
image = ''
finished = False
msg_type = 'error'
try:
msg = kc.get_iopub_msg()
msg_type = msg['msg_type']
if msg_type == 'status':
if msg['content'].get('execution_state') == 'idle':
finished = True
elif msg_type == 'execute_result':
text = msg['content']['data'].get('text/plain', '')
if 'image/png' in msg['content']['data']:
image_b64 = msg['content']['data']['image/png']
image_url = self._serve_image(image_b64)
image_idx += 1
image = '' % (image_idx, image_url)
elif msg_type == 'display_data':
if 'image/png' in msg['content']['data']:
image_b64 = msg['content']['data']['image/png']
image_url = self._serve_image(image_b64)
image_idx += 1
image = '' % (image_idx, image_url)
else:
text = msg['content']['data'].get('text/plain', '')
elif msg_type == 'stream':
msg_type = msg['content']['name'] # stdout, stderr
text = msg['content']['text']
elif msg_type == 'error':
text = _escape_ansi('\n'.join(msg['content']['traceback']))
if 'M6_CODE_INTERPRETER_TIMEOUT' in text:
text = 'Timeout: Code execution exceeded the time limit.'
except queue.Empty:
text = 'Timeout: Code execution exceeded the time limit.'
finished = True
except Exception:
text = 'The code interpreter encountered an unexpected error.'
print_traceback()
finished = True
if text:
result += f'\n\n{msg_type}:\n\n```\n{text}\n```'
if image:
result += f'\n\n{image}'
if finished:
break
result = result.lstrip('\n')
return result
def _serve_image(self, image_base64: str) -> str:
import PIL.Image
image_file = f'{uuid.uuid4()}.png'
local_image_file = os.path.join(self.work_dir, image_file)
png_bytes = base64.b64decode(image_base64)
assert isinstance(png_bytes, bytes)
bytes_io = io.BytesIO(png_bytes)
PIL.Image.open(bytes_io).save(local_image_file, 'png')
image_server_url = os.getenv('M6_CODE_INTERPRETER_STATIC_URL', '')
if image_server_url:
return f'{image_server_url}/{image_file}'
return local_image_file
def _check_deps_for_code_interpreter():
try:
import matplotlib # noqa
import matplotlib.pyplot as plt # noqa
import numpy as np # noqa
import pandas as pd # noqa
import PIL.Image # noqa
import seaborn as sns # noqa
from jupyter_client import BlockingKernelClient # noqa
from sympy import Eq, solve, symbols # noqa
except ImportError as e:
raise ImportError(
'The dependencies for Code Interpreter support are not installed. '
'Please install the required dependencies by running: pip install "qwen-agent[code_interpreter]"') from e
def _fix_matplotlib_cjk_font_issue():
import matplotlib
ttf_name = os.path.basename(ALIB_FONT_FILE)
local_ttf = os.path.join(os.path.abspath(os.path.join(matplotlib.matplotlib_fname(), os.path.pardir)), 'fonts',
'ttf', ttf_name)
if not os.path.exists(local_ttf):
try:
shutil.copy(ALIB_FONT_FILE, local_ttf)
font_list_cache = os.path.join(matplotlib.get_cachedir(), 'fontlist-*.json')
for cache_file in glob.glob(font_list_cache):
with open(cache_file) as fin:
cache_content = fin.read()
if ttf_name not in cache_content:
os.remove(cache_file)
except Exception:
print_traceback()
def _escape_ansi(line: str) -> str:
ansi_escape = re.compile(r'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]')
return ansi_escape.sub('', line)
#
# The _BasePolicy and AnyThreadEventLoopPolicy below are borrowed from Tornado.
# Ref: https://www.tornadoweb.org/en/stable/_modules/tornado/platform/asyncio.html#AnyThreadEventLoopPolicy
#
if sys.platform == 'win32' and hasattr(asyncio, 'WindowsSelectorEventLoopPolicy'):
_BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore
else:
_BasePolicy = asyncio.DefaultEventLoopPolicy
class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore
"""Event loop policy that allows loop creation on any thread.
The default `asyncio` event loop policy only automatically creates
event loops in the main threads. Other threads must create event
loops explicitly or `asyncio.get_event_loop` (and therefore
`.IOLoop.current`) will fail. Installing this policy allows event
loops to be created automatically on any thread.
Usage::
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
"""
def get_event_loop(self) -> asyncio.AbstractEventLoop:
try:
return super().get_event_loop()
except RuntimeError:
# "There is no current event loop in thread %r"
loop = self.new_event_loop()
self.set_event_loop(loop)
return loop
================================================
FILE: verl/workers/rollout/vllm_rollout/qwen_agent/tools/python_executor.py
================================================
import copy
import datetime
import io
import os
import pickle
import traceback
from concurrent.futures import TimeoutError
from contextlib import redirect_stdout
from functools import partial
from typing import Any, Dict, List, Optional, Union
from multiprocessing import Pool, cpu_count
import json5
import regex
from tqdm import tqdm
from qwen_agent.tools.base import BaseTool
from qwen_agent.utils.utils import extract_code
class GenericRuntime:
GLOBAL_DICT = {}
LOCAL_DICT = None
HEADERS = []
def __init__(self):
self._global_vars = copy.copy(self.GLOBAL_DICT)
self._local_vars = copy.copy(self.LOCAL_DICT) if self.LOCAL_DICT else None
for c in self.HEADERS:
self.exec_code(c)
def exec_code(self, code_piece: str) -> None:
if regex.search(r'(\s|^)?input\(', code_piece) or regex.search(r'(\s|^)?os.system\(', code_piece):
raise RuntimeError()
exec(code_piece, self._global_vars)
def eval_code(self, expr: str) -> Any:
return eval(expr, self._global_vars)
def inject(self, var_dict: Dict[str, Any]) -> None:
for k, v in var_dict.items():
self._global_vars[k] = v
@property
def answer(self):
return self._global_vars['answer']
class DateRuntime(GenericRuntime):
import dateutil.relativedelta
GLOBAL_DICT = {
'datetime': datetime.datetime,
'timedelta': dateutil.relativedelta.relativedelta,
'relativedelta': dateutil.relativedelta.relativedelta
}
class CustomDict(dict):
def __iter__(self):
return list(super().__iter__()).__iter__()
class ColorObjectRuntime(GenericRuntime):
GLOBAL_DICT = {'dict': CustomDict}
def _check_deps_for_python_executor():
try:
import dateutil.relativedelta # noqa
import multiprocess # noqa
from multiprocess import Pool # noqa
from pebble import ProcessPool # noqa
from timeout_decorator import timeout # noqa
except ImportError as e:
raise ImportError(
'The dependencies for Python Executor support are not installed. '
'Please install the required dependencies by running: pip install "qwen-agent[python_executor]"') from e
# @register_tool('python_executor') # Do not register this tool by default because it is dangerous.
class PythonExecutor(BaseTool):
name = 'python_executor'
description = 'For executing python code. Not sandboxed. Do not use it for production purposes.'
parameters = [{'name': 'code', 'type': 'string', 'description': '待执行的代码', 'required': True}]
def __init__(self, cfg: Optional[Dict] = None):
_check_deps_for_python_executor()
import multiprocess
from multiprocess import Pool
super().__init__(cfg)
runtime: Optional[Any] = self.cfg.get('runtime', None)
get_answer_symbol: Optional[str] = self.cfg.get('get_answer_symbol', None)
get_answer_expr: Optional[str] = self.cfg.get('get_answer_expr', None)
get_answer_from_stdout: bool = self.cfg.get('get_answer_from_stdout', True)
timeout_length: int = self.cfg.get('timeout_length', 20)
self.runtime = runtime if runtime else GenericRuntime()
self.answer_symbol = get_answer_symbol
self.answer_expr = get_answer_expr
self.get_answer_from_stdout = get_answer_from_stdout
self.pool = Pool(multiprocess.cpu_count())
self.timeout_length = timeout_length
def call(self, params: Union[str, dict], **kwargs) -> list:
try:
params = json5.loads(params)
code = params['code']
except Exception:
code = extract_code(params)
if not code.strip():
return ['', '']
predictions = self.apply(code)
return predictions
def apply(self, code: str) -> list:
return self.batch_apply([code])[0]
def process_generation_to_code(self, gens: str):
return [g.split('\n') for g in gens]
@staticmethod
def execute(
code,
get_answer_from_stdout=None,
runtime=None,
answer_symbol=None,
answer_expr=None,
timeout_length=20,
):
from timeout_decorator import timeout
try:
if get_answer_from_stdout:
program_io = io.StringIO()
with redirect_stdout(program_io):
timeout(timeout_length)(runtime.exec_code)('\n'.join(code))
program_io.seek(0)
result = program_io.read()
elif answer_symbol:
timeout(timeout_length)(runtime.exec_code)('\n'.join(code))
result = runtime._global_vars[answer_symbol]
elif answer_expr:
timeout(timeout_length)(runtime.exec_code)('\n'.join(code))
result = timeout(timeout_length)(runtime.eval_code)(answer_expr)
else:
timeout(timeout_length)(runtime.exec_code)('\n'.join(code[:-1]))
result = timeout(timeout_length)(runtime.eval_code)(code[-1])
report = 'Done'
str(result)
pickle.dumps(result) # serialization check
except Exception:
result = ''
report = traceback.format_exc().split('\n')[-2]
return result, report
@staticmethod
def truncate(s, max_length=256):
half = max_length // 2
if len(s) > max_length:
s = s[:half] + '...' + s[-half:]
return s
def batch_apply(self, batch_code: List[str]) -> list:
from pebble import ProcessPool
all_code_snippets = self.process_generation_to_code(batch_code)
timeout_cnt = 0
all_exec_results = []
with ProcessPool(max_workers=min(len(all_code_snippets), os.cpu_count())) as pool:
executor = partial(
self.execute,
get_answer_from_stdout=self.get_answer_from_stdout,
runtime=self.runtime,
answer_symbol=self.answer_symbol,
answer_expr=self.answer_expr,
timeout_length=self.timeout_length, # this timeout not work
)
future = pool.map(executor, all_code_snippets, timeout=self.timeout_length)
iterator = future.result()
if len(all_code_snippets) > 100:
progress_bar = tqdm(total=len(all_code_snippets), desc='Execute')
else:
progress_bar = None
while True:
try:
result = next(iterator)
all_exec_results.append(result)
except StopIteration:
break
except TimeoutError as error:
print(error)
all_exec_results.append(('', 'Timeout Error'))
timeout_cnt += 1
except Exception as error:
print(error)
exit()
if progress_bar is not None:
progress_bar.update(1)
if progress_bar is not None:
progress_bar.close()
batch_results = []
for code, (res, report) in zip(all_code_snippets, all_exec_results):
# post processing
res, report = str(res).strip(), str(report).strip()
res, report = self.truncate(res), self.truncate(report)
batch_results.append((res, report))
return batch_results
def _test():
batch_code = ["""import math
print(math.pi*4)"""]
executor = PythonExecutor()
predictions = executor.apply(batch_code[0])
print(predictions)
if __name__ == '__main__':
_test()
================================================
FILE: verl/workers/rollout/vllm_rollout/qwen_agent/utils/utils.py
================================================
import base64
import copy
import hashlib
import json
import os
import re
import shutil
import signal
import socket
import sys
import time
import traceback
import urllib.parse
from io import BytesIO
from typing import Any, List, Literal, Optional, Tuple, Union
import json5
import requests
from pydantic import BaseModel
from qwen_agent.llm.schema import ASSISTANT, DEFAULT_SYSTEM_MESSAGE, FUNCTION, SYSTEM, USER, ContentItem, Message
from qwen_agent.log import logger
def append_signal_handler(sig, handler):
"""
Installs a new signal handler while preserving any existing handler.
If an existing handler is present, it will be called _after_ the new handler.
"""
old_handler = signal.getsignal(sig)
if not callable(old_handler):
old_handler = None
if sig == signal.SIGINT:
def old_handler(*args, **kwargs):
raise KeyboardInterrupt
elif sig == signal.SIGTERM:
def old_handler(*args, **kwargs):
raise SystemExit
def new_handler(*args, **kwargs):
handler(*args, **kwargs)
if old_handler is not None:
old_handler(*args, **kwargs)
signal.signal(sig, new_handler)
def get_local_ip() -> str:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
except Exception:
ip = '127.0.0.1'
finally:
s.close()
return ip
def hash_sha256(text: str) -> str:
hash_object = hashlib.sha256(text.encode())
key = hash_object.hexdigest()
return key
def print_traceback(is_error: bool = True):
tb = ''.join(traceback.format_exception(*sys.exc_info(), limit=3))
if is_error:
logger.error(tb)
else:
logger.warning(tb)
CHINESE_CHAR_RE = re.compile(r'[\u4e00-\u9fff]')
def has_chinese_chars(data: Any) -> bool:
text = f'{data}'
return bool(CHINESE_CHAR_RE.search(text))
def has_chinese_messages(messages: List[Union[Message, dict]], check_roles: Tuple[str] = (SYSTEM, USER)) -> bool:
for m in messages:
if m['role'] in check_roles:
if has_chinese_chars(m['content']):
return True
return False
def get_basename_from_url(path_or_url: str) -> str:
if re.match(r'^[A-Za-z]:\\', path_or_url):
# "C:\\a\\b\\c" -> "C:/a/b/c"
path_or_url = path_or_url.replace('\\', '/')
# "/mnt/a/b/c" -> "c"
# "https://github.com/here?k=v" -> "here"
# "https://github.com/" -> ""
basename = urllib.parse.urlparse(path_or_url).path
basename = os.path.basename(basename)
basename = urllib.parse.unquote(basename)
basename = basename.strip()
# "https://github.com/" -> "" -> "github.com"
if not basename:
basename = [x.strip() for x in path_or_url.split('/') if x.strip()][-1]
return basename
def is_http_url(path_or_url: str) -> bool:
if path_or_url.startswith('https://') or path_or_url.startswith('http://'):
return True
return False
def is_image(path_or_url: str) -> bool:
filename = get_basename_from_url(path_or_url).lower()
for ext in ['jpg', 'jpeg', 'png', 'webp']:
if filename.endswith(ext):
return True
return False
def sanitize_chrome_file_path(file_path: str) -> str:
if os.path.exists(file_path):
return file_path
# Dealing with "file:///...":
new_path = urllib.parse.urlparse(file_path)
new_path = urllib.parse.unquote(new_path.path)
new_path = sanitize_windows_file_path(new_path)
if os.path.exists(new_path):
return new_path
return sanitize_windows_file_path(file_path)
def sanitize_windows_file_path(file_path: str) -> str:
# For Linux and macOS.
if os.path.exists(file_path):
return file_path
# For native Windows, drop the leading '/' in '/C:/'
win_path = file_path
if win_path.startswith('/'):
win_path = win_path[1:]
if os.path.exists(win_path):
return win_path
# For Windows + WSL.
if re.match(r'^[A-Za-z]:/', win_path):
wsl_path = f'/mnt/{win_path[0].lower()}/{win_path[3:]}'
if os.path.exists(wsl_path):
return wsl_path
# For native Windows, replace / with \.
win_path = win_path.replace('/', '\\')
if os.path.exists(win_path):
return win_path
return file_path
def save_url_to_local_work_dir(url: str, save_dir: str, save_filename: str = '') -> str:
if not save_filename:
save_filename = get_basename_from_url(url)
new_path = os.path.join(save_dir, save_filename)
if os.path.exists(new_path):
os.remove(new_path)
logger.info(f'Downloading {url} to {new_path}...')
start_time = time.time()
if not is_http_url(url):
url = sanitize_chrome_file_path(url)
shutil.copy(url, new_path)
else:
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
with open(new_path, 'wb') as file:
file.write(response.content)
else:
raise ValueError('Can not download this file. Please check your network or the file link.')
end_time = time.time()
logger.info(f'Finished downloading {url} to {new_path}. Time spent: {end_time - start_time} seconds.')
return new_path
def save_text_to_file(path: str, text: str) -> None:
with open(path, 'w', encoding='utf-8') as fp:
fp.write(text)
def read_text_from_file(path: str) -> str:
try:
with open(path, 'r', encoding='utf-8') as file:
file_content = file.read()
except UnicodeDecodeError:
print_traceback(is_error=False)
from charset_normalizer import from_path
results = from_path(path)
file_content = str(results.best())
return file_content
def contains_html_tags(text: str) -> bool:
pattern = r'<(p|span|div|li|html|script)[^>]*?'
return bool(re.search(pattern, text))
def get_content_type_by_head_request(path: str) -> str:
try:
response = requests.head(path, timeout=5)
content_type = response.headers.get('Content-Type', '')
return content_type
except requests.RequestException:
return 'unk'
def get_file_type(path: str) -> Literal['pdf', 'docx', 'pptx', 'txt', 'html', 'csv', 'tsv', 'xlsx', 'xls', 'unk']:
f_type = get_basename_from_url(path).split('.')[-1].lower()
if f_type in ['pdf', 'docx', 'pptx', 'csv', 'tsv', 'xlsx', 'xls']:
# Specially supported file types
return f_type
if is_http_url(path):
# The HTTP header information for the response is obtained by making a HEAD request to the target URL,
# where the Content-type field usually indicates the Type of Content to be returned
content_type = get_content_type_by_head_request(path)
if 'application/pdf' in content_type:
return 'pdf'
elif 'application/msword' in content_type:
return 'docx'
# Assuming that the URL is HTML by default,
# because the file downloaded by the request may contain html tags
return 'html'
else:
# Determine by reading local HTML file
try:
content = read_text_from_file(path)
except Exception:
print_traceback()
return 'unk'
if contains_html_tags(content):
return 'html'
else:
return 'txt'
def extract_urls(text: str) -> List[str]:
pattern = re.compile(r'https?://\S+')
urls = re.findall(pattern, text)
return urls
def extract_markdown_urls(md_text: str) -> List[str]:
pattern = r'!?\[[^\]]*\]\(([^\)]+)\)'
urls = re.findall(pattern, md_text)
return urls
def extract_code(text: str) -> str:
# Match triple backtick blocks first
triple_match = re.search(r'```[^\n]*\n(.+?)```', text, re.DOTALL)
if triple_match:
text = triple_match.group(1)
else:
try:
text = json5.loads(text)['code']
except Exception:
print_traceback(is_error=False)
# If no code blocks found, return original text
return text
def json_loads(text: str) -> dict:
text = text.strip('\n')
if text.startswith('```') and text.endswith('\n```'):
text = '\n'.join(text.split('\n')[1:-1])
try:
return json.loads(text)
except json.decoder.JSONDecodeError as json_err:
try:
return json5.loads(text)
except ValueError:
raise json_err
class PydanticJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, BaseModel):
return obj.model_dump()
return super().default(obj)
def json_dumps_pretty(obj: dict, ensure_ascii=False, indent=2, **kwargs) -> str:
return json.dumps(obj, ensure_ascii=ensure_ascii, indent=indent, cls=PydanticJSONEncoder, **kwargs)
def json_dumps_compact(obj: dict, ensure_ascii=False, indent=None, **kwargs) -> str:
return json.dumps(obj, ensure_ascii=ensure_ascii, indent=indent, cls=PydanticJSONEncoder, **kwargs)
def format_as_multimodal_message(
msg: Message,
add_upload_info: bool,
add_multimodel_upload_info: bool,
lang: Literal['auto', 'en', 'zh'] = 'auto',
) -> Message:
assert msg.role in (USER, ASSISTANT, SYSTEM, FUNCTION)
content: List[ContentItem] = []
if isinstance(msg.content, str): # if text content
if msg.content:
content = [ContentItem(text=msg.content)]
elif isinstance(msg.content, list): # if multimodal content
files = []
for item in msg.content:
k, v = item.get_type_and_value()
if k in ('text', 'image', 'audio', 'video'):
content.append(item)
if k == 'file':
# Move 'file' out of 'content' since it's not natively supported by models
files.append(v)
if add_multimodel_upload_info and k == 'image':
# Indicate the image name
# Not considering audio and video for now
files.append(v)
if add_upload_info and files and (msg.role in (SYSTEM, USER)):
if lang == 'auto':
has_zh = has_chinese_chars(msg)
else:
has_zh = (lang == 'zh')
upload = []
for f in [get_basename_from_url(f) for f in files]:
if is_image(f):
if has_zh:
upload.append(f'')
else:
upload.append(f'')
else:
if has_zh:
upload.append(f'[文件]({f})')
else:
upload.append(f'[file]({f})')
upload = ' '.join(upload)
if has_zh:
upload = f'(上传了 {upload})\n\n'
else:
upload = f'(Uploaded {upload})\n\n'
# Check and avoid adding duplicate upload info
upload_info_already_added = False
for item in content:
if item.text and (upload in item.text):
upload_info_already_added = True
if not upload_info_already_added:
content = [ContentItem(text=upload)] + content
else:
raise TypeError
msg = Message(role=msg.role,
content=content,
name=msg.name if msg.role == FUNCTION else None,
function_call=msg.function_call,
extra=msg.extra)
return msg
def format_as_text_message(
msg: Message,
add_upload_info: bool,
lang: Literal['auto', 'en', 'zh'] = 'auto',
) -> Message:
msg = format_as_multimodal_message(msg,
add_upload_info=add_upload_info,
add_multimodel_upload_info=add_upload_info,
lang=lang)
text = ''
for item in msg.content:
if item.type == 'text':
text += item.value
msg.content = text
return msg
def extract_text_from_message(
msg: Message,
add_upload_info: bool,
lang: Literal['auto', 'en', 'zh'] = 'auto',
) -> str:
if isinstance(msg.content, list):
text = format_as_text_message(msg, add_upload_info=add_upload_info, lang=lang).content
elif isinstance(msg.content, str):
text = msg.content
else:
raise TypeError(f'List of str or str expected, but received {type(msg.content).__name__}.')
return text.strip()
def extract_files_from_messages(messages: List[Message], include_images: bool) -> List[str]:
files = []
for msg in messages:
if isinstance(msg.content, list):
for item in msg.content:
if item.file and item.file not in files:
files.append(item.file)
if include_images and item.image and item.image not in files:
files.append(item.image)
return files
def merge_generate_cfgs(base_generate_cfg: Optional[dict], new_generate_cfg: Optional[dict]) -> dict:
generate_cfg: dict = copy.deepcopy(base_generate_cfg or {})
if new_generate_cfg:
for k, v in new_generate_cfg.items():
if k == 'stop':
stop = generate_cfg.get('stop', [])
stop = stop + [s for s in v if s not in stop]
generate_cfg['stop'] = stop
else:
generate_cfg[k] = v
return generate_cfg
def build_text_completion_prompt(
messages: List[Message],
allow_special: bool = False,
default_system: str = DEFAULT_SYSTEM_MESSAGE,
) -> str:
im_start = '<|im_start|>'
im_end = '<|im_end|>'
if messages[0].role == SYSTEM:
sys = messages[0].content
assert isinstance(sys, str)
prompt = f'{im_start}{SYSTEM}\n{sys}{im_end}'
messages = messages[1:]
else:
prompt = f'{im_start}{SYSTEM}\n{default_system}{im_end}'
# Make sure we are completing the chat in the tone of the assistant
if messages[-1].role != ASSISTANT:
messages = messages + [Message(ASSISTANT, '')]
for msg in messages:
assert isinstance(msg.content, str)
content = msg.content
if allow_special:
assert msg.role in (USER, ASSISTANT, SYSTEM, FUNCTION)
if msg.function_call:
assert msg.role == ASSISTANT
tool_call = msg.function_call.arguments
try:
tool_call = {'name': msg.function_call.name, 'arguments': json.loads(tool_call)}
tool_call = json.dumps(tool_call, ensure_ascii=False, indent=2)
except json.decoder.JSONDecodeError:
tool_call = '{"name": "' + msg.function_call.name + '", "arguments": ' + tool_call + '}'
if content:
content += '\n'
content += f'\n{tool_call}\n'
else:
assert msg.role in (USER, ASSISTANT)
assert msg.function_call is None
prompt += f'\n{im_start}{msg.role}\n{content}{im_end}'
assert prompt.endswith(im_end)
prompt = prompt[:-len(im_end)]
return prompt
def encode_image_as_base64(path: str, max_short_side_length: int = -1) -> str:
from PIL import Image
image = Image.open(path)
if (max_short_side_length > 0) and (min(image.size) > max_short_side_length):
ori_size = image.size
image = resize_image(image, short_side_length=max_short_side_length)
logger.debug(f'Image "{path}" resized from {ori_size} to {image.size}.')
image = image.convert(mode='RGB')
buffered = BytesIO()
image.save(buffered, format='JPEG')
return 'data:image/jpeg;base64,' + base64.b64encode(buffered.getvalue()).decode('utf-8')
def load_image_from_base64(image_base64: Union[bytes, str]):
from PIL import Image
image = Image.open(BytesIO(base64.b64decode(image_base64)))
image.load()
return image
def resize_image(img, short_side_length: int = 1080):
from PIL import Image
assert isinstance(img, Image.Image)
width, height = img.size
if width <= height:
new_width = short_side_length
new_height = int((short_side_length / width) * height)
else:
new_height = short_side_length
new_width = int((short_side_length / height) * width)
resized_img = img.resize((new_width, new_height), resample=Image.Resampling.BILINEAR)
return resized_img
def get_last_usr_msg_idx(messages: List[Union[dict, Message]]) -> int:
i = len(messages) - 1
while (i >= 0) and (messages[i]['role'] != 'user'):
i -= 1
assert i >= 0, messages
assert messages[i]['role'] == 'user'
return i
================================================
FILE: verl/workers/rollout/vllm_rollout/vllm_rollout.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The vllm_rollout that can be applied in different backend
When working with FSDP:
- Use DTensor weight loader (recommended) or HF weight loader
- Utilize state_dict from the FSDP to synchronize the weights among tp ranks in vLLM
When working with Megatron:
- Use Megatron weight loader
- During training, only the current pp stage holds the parameters
- Before inference, broadcast the parameters of the current pp rank to all other pp ranks (all pp ranks holds all the parameters)
- Bind the parameters to the inference engine
- Do inference in tp. pp is treated as additional dp
- After inference, all the parameters that doesn't belong to this pp rank is freed.
"""
from typing import List
from contextlib import contextmanager
from omegaconf import DictConfig
import torch
import torch.distributed
from tensordict import TensorDict
from torch import nn
from verl import DataProto
from verl.utils.torch_functional import get_eos_mask, pad_sequence_to_length
from verl.workers.rollout.base import BaseRollout
from verl.third_party.vllm import LLM, vllm_version
from verl.third_party.vllm import parallel_state as vllm_ps
from vllm import SamplingParams
# TODO
# 1. support pp in vllm
# 2. passing tokenizer is not necessary? no encoding/decoding is happending here
# 3. simplify init logics
# NOTE(sgm): add for verl. We can optimize it by making the dataloader yield List[int] without padding.
def _pre_process_inputs(pad_token_id, prompt_token_ids: torch.Tensor) -> List[int]:
# remove the left padding in the prompt token_id
# pad_token_id = self.llm_engine.tokenizer.pad_token_id if self.llm_engine.tokenizer.pad_token_id is not None else self.llm_engine.tokenizer.eos_token_id
non_pad_index = torch.nonzero(prompt_token_ids != pad_token_id, as_tuple=False)[0][0]
token_ids = prompt_token_ids[non_pad_index:].tolist()
return token_ids
class vLLMRollout(BaseRollout):
def __init__(self, actor_module: nn.Module, config: DictConfig, tokenizer, model_hf_config, **kwargs):
"""A vLLM rollout. It requires the module is supported by the vllm.
Args:
module: module here follows huggingface APIs
config: DictConfig
tokenizer: the task/model tokenizer
model_hf_config: the huggingface config to initiallize the generating model in vllm
**kwargs: train_tp, for Megatron Backend to initialize hybrid engine (zero redundancy) process group
"""
super().__init__()
self.config = config
assert not (not config.enforce_eager and config.free_cache_engine), \
"disable CUDA graph (enforce_eager = False) if free cache engine"
tensor_parallel_size = self.config.get('tensor_model_parallel_size', 1)
assert tensor_parallel_size <= torch.distributed.get_world_size(), \
"tensor parallel size should be less than or equal to the world size"
max_num_batched_tokens = int(self.config.get('max_num_batched_tokens', 8192))
if kwargs.get('train_tp', None) is not None:
# deployed with megatron
import os
os.environ['CUDA_TIMER_STREAM_KAFKA_ENABLE'] = '0'
os.environ['MEGATRON_IMPORT_TIMERS'] = '0'
train_tp = kwargs.get('train_tp', None)
num_tp_per_train_tp = train_tp // tensor_parallel_size
if vllm_version in ('0.4.2', '0.5.4', '0.6.3'):
vllm_ps.initialize_parallel_state(tensor_model_parallel_size=tensor_parallel_size,
num_tp_per_train_tp=num_tp_per_train_tp)
assert model_hf_config.max_position_embeddings >= config.prompt_length + config.response_length, \
"model context length should be greater than total sequence length"
max_model_len = self.config.max_model_len if self.config.max_model_len \
else config.prompt_length + config.response_length
max_model_len = int(max_model_len)
if max_num_batched_tokens < max_model_len and self.config.enable_chunked_prefill:
raise ValueError('Enable chunked prefill, max_num_batched_tokens is smaller than max_model_len, \
please increase max_num_batched_tokens or disable chunked prefill')
self.inference_engine = LLM(
actor_module,
tokenizer=tokenizer,
model_hf_config=model_hf_config,
tensor_parallel_size=tensor_parallel_size,
dtype=config.dtype,
enforce_eager=config.enforce_eager,
gpu_memory_utilization=config.gpu_memory_utilization,
skip_tokenizer_init=False,
max_model_len=max_model_len,
load_format=config.load_format,
disable_log_stats=config.disable_log_stats,
max_num_batched_tokens=max_num_batched_tokens,
enable_chunked_prefill=config.enable_chunked_prefill,
)
# Offload vllm model to reduce peak memory usage
self.inference_engine.offload_model_weights()
kwargs = dict(
n=1,
logprobs=1, # can be set to 0 and let actor to recompute
max_tokens=config.response_length,
)
# we may detokenize the result all together later
if vllm_version in ('0.4.2', '0.5.4', '0.6.3'):
kwargs['detokenize'] = False
# supporting adding any sampling params from the config file
for k in config.keys():
if hasattr(SamplingParams(), str(k)):
kwargs[k] = config.get(k)
print(f"kwargs: {kwargs}")
self.sampling_params = SamplingParams(**kwargs)
self.pad_token_id = tokenizer.pad_token_id
@contextmanager
def update_sampling_params(self, **kwargs):
# update sampling params
old_sampling_params_args = {}
if kwargs:
for key, value in kwargs.items():
if hasattr(self.sampling_params, key):
old_value = getattr(self.sampling_params, key)
old_sampling_params_args[key] = old_value
setattr(self.sampling_params, key, value)
yield
# roll back to previous sampling params
# if len(old_sampling_params_args):
for key, value in old_sampling_params_args.items():
setattr(self.sampling_params, key, value)
@torch.no_grad()
def generate_sequences(self, prompts: DataProto, **kwargs) -> DataProto:
# rebuild vllm cache engine
if self.config.free_cache_engine:
self.inference_engine.init_cache_engine()
idx = prompts.batch['input_ids'] # (bs, prompt_length)
# left-padded attention_mask
attention_mask = prompts.batch['attention_mask']
position_ids = prompts.batch['position_ids']
# used to construct attention_mask
eos_token_id = prompts.meta_info['eos_token_id']
batch_size = idx.size(0)
idx_list = []
# parse idx from torch.Tensor to List[List[str]]
for i in range(batch_size):
idx_list.append(_pre_process_inputs(self.pad_token_id, idx[i]))
do_sample = prompts.meta_info.get('do_sample', True)
if not do_sample:
kwargs = {
'best_of': 1,
'top_p': 1.0,
'top_k': -1,
'min_p': 0.0,
'temperature': 0,
'n': 1 # if greedy, only 1 response
}
# users can customize different sampling_params at different run
with self.update_sampling_params(**kwargs):
output = self.inference_engine.generate(
prompts=None, # because we have already convert it to prompt token id
sampling_params=self.sampling_params,
prompt_token_ids=idx_list,
use_tqdm=False)
# TODO(sgm): disable logprob when recompute_log_prob is enable
# if n = 1: (bs, response_length) ; if n > 1: (bs * n, response_length)
response = output[0].to(idx.device)
log_probs = output[1].to(idx.device)
if response.shape[1] < self.config.response_length:
response = pad_sequence_to_length(response, self.config.response_length, self.pad_token_id)
log_probs = pad_sequence_to_length(log_probs, self.config.response_length, self.pad_token_id)
if self.config.n > 1 and do_sample:
idx = idx.repeat_interleave(self.config.n, dim=0)
attention_mask = attention_mask.repeat_interleave(self.config.n, dim=0)
position_ids = position_ids.repeat_interleave(self.config.n, dim=0)
batch_size = batch_size * self.config.n
seq = torch.cat([idx, response], dim=-1)
response_length = response.size(1)
delta_position_id = torch.arange(1, response_length + 1, device=position_ids.device)
delta_position_id = delta_position_id.unsqueeze(0).repeat(batch_size, 1)
# TODO(sgm): fix position_ids on right_pad
# prompt: left pad + response: right pad
# attention_mask: [0,0,0,0,1,1,1,1, | 1,1,1,0,0,0,0,0]
# position_ids: [0,0,0,0,0,1,2,3, | 4,5,6,7,8,9,10,11]
response_position_ids = position_ids[:, -1:] + delta_position_id
position_ids = torch.cat([position_ids, response_position_ids], dim=-1)
response_attention_mask = get_eos_mask(response_id=response, eos_token=eos_token_id, dtype=attention_mask.dtype)
attention_mask = torch.cat((attention_mask, response_attention_mask), dim=-1)
# all the tp ranks should contain the same data here. data in all ranks are valid
batch = TensorDict(
{
'prompts': idx,
'responses': response,
'input_ids': seq, # here input_ids become the whole sentences
# 'old_log_probs': log_probs, # we will recompute old log prob with actor
'attention_mask': attention_mask,
'position_ids': position_ids
},
batch_size=batch_size)
# free vllm cache engine
if self.config.free_cache_engine:
self.inference_engine.free_cache_engine()
return DataProto(batch=batch)
================================================
FILE: verl/workers/rollout/vllm_rollout/vllm_rollout_spmd.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The vllm_rollout that can be applied in different backend
When working with FSDP:
- Use DTensor weight loader (recommended) or HF weight loader
- Utilize state_dict from the FSDP to synchronize the weights among tp ranks in vLLM
When working with Megatron:
- Use Megatron weight loader
- During training, only the current pp stage holds the parameters
- Before inference, broadcast the parameters of the current pp rank to all other pp ranks (all pp ranks holds all the parameters)
- Bind the parameters to the inference engine
- Do inference in tp. pp is treated as additional dp
- After inference, all the parameters that doesn't belong to this pp rank is freed.
"""
import numpy as np
from typing import List
from contextlib import contextmanager
from omegaconf import DictConfig
import os
import torch
import torch.distributed
from tensordict import TensorDict
import requests
from multiprocessing import Pool
from functools import partial
from torch import nn
from typing import Any, Union
from verl import DataProto
from verl.utils.torch_functional import get_eos_mask, pad_2d_list_to_length
from verl.workers.rollout.base import BaseRollout
from vllm.distributed import parallel_state as vllm_ps
from vllm import LLM, SamplingParams
from verl.third_party.vllm import vllm_version
from qwen_agent.tools.python_executor import PythonExecutor
from qwen_agent.tools.code_interpreter import CodeInterpreter
from qwen_agent.utils.utils import print_traceback
from typing import Tuple
import json5
import pdb
import json
import copy
from concurrent.futures import ThreadPoolExecutor, as_completed, TimeoutError
# TODO
# 1. support pp in vllm
# 2. passing tokenizer is not necessary? no encoding/decoding is happending here
# 3. simplify init logics
# NOTE(sgm): add for verl. We can optimize it by making the dataloader yield List[int] without padding.
def _pre_process_inputs(pad_token_id, prompt_token_ids: torch.Tensor) -> List[int]:
# remove the left padding in the prompt token_id
# pad_token_id = self.llm_engine.tokenizer.pad_token_id if self.llm_engine.tokenizer.pad_token_id is not None else self.llm_engine.tokenizer.eos_token_id
non_pad_index = torch.nonzero(prompt_token_ids != pad_token_id, as_tuple=False)[0][0]
token_ids = prompt_token_ids[non_pad_index:].tolist()
return token_ids
def _repeat_interleave(value: Union[torch.Tensor, np.ndarray], repeats: int) -> Union[torch.Tensor, List[Any]]:
if isinstance(value, torch.Tensor):
return value.repeat_interleave(repeats, dim=0)
else:
return np.repeat(value, repeats, axis=0)
OBS_START = '```output'
OBS_END = '\n```\n'
def extract_program(result: str, last_only=True):
"""
extract the program after "```python", and before "```"
"""
program = ''
start = False
for line in result.split('\n'):
if line.startswith('```python') or line.endswith('```python'):
if last_only:
program = '' # only extract the last program
else:
program += '\n# ========\n'
start = True
elif line.startswith('```'):
start = False
elif start:
program += line + '\n'
if start:
# the code is incomplete
program = ''
return program
def _detect_tool(text: str) -> Tuple[bool, str, str, str]:
program = extract_program(text)
if program:
program = json.dumps({'code': program}, ensure_ascii=False)
return (program != ''), PythonExecutor.name, program, text
def send_request(json_data):
try:
url = 'sandbox_url'
response = requests.post(url, json=json_data, timeout=10)
return response.json() # 返回响应的 JSON 数据
except:
print("sanbox timeout")
return {"error": "unknown"}
class vLLMRollout(BaseRollout):
def __init__(self, model_path: str, config: DictConfig, tokenizer, model_hf_config, **kwargs):
"""A vLLM rollout. It requires the module is supported by the vllm.
Args:
module: module here follows huggingface APIs
config: DictConfig
tokenizer: the task/model tokenizer
model_hf_config: the huggingface config to initiallize the generating model in vllm
**kwargs: train_tp, for Megatron Backend to initialize hybrid engine (zero redundancy) process group
"""
super().__init__()
self.config = config
assert not (not config.enforce_eager and config.free_cache_engine), \
"disable CUDA graph (enforce_eager = False) if free cache engine"
tensor_parallel_size = self.config.get('tensor_model_parallel_size', 1)
assert tensor_parallel_size <= torch.distributed.get_world_size(), \
"tensor parallel size should be less than or equal to the world size"
max_num_batched_tokens = self.config.get('max_num_batched_tokens', 8192)
if kwargs.get('train_tp', None) is not None:
# deployed with megatron
import os
os.environ['CUDA_TIMER_STREAM_KAFKA_ENABLE'] = '0'
os.environ['MEGATRON_IMPORT_TIMERS'] = '0'
train_tp = kwargs.get('train_tp', None)
num_tp_per_train_tp = train_tp // tensor_parallel_size
vllm_ps.initialize_parallel_state(tensor_model_parallel_size=tensor_parallel_size,
num_tp_per_train_tp=num_tp_per_train_tp)
assert model_hf_config.max_position_embeddings >= config.prompt_length + config.response_length, \
"model context length should be greater than total sequence length"
self.inference_engine = LLM(
model=model_path,
enable_sleep_mode=True,
tensor_parallel_size=tensor_parallel_size,
distributed_executor_backend="external_launcher",
dtype=config.dtype,
enforce_eager=config.enforce_eager,
gpu_memory_utilization=config.gpu_memory_utilization,
disable_custom_all_reduce=True,
skip_tokenizer_init=False,
max_model_len=config.prompt_length + config.response_length,
disable_log_stats=config.disable_log_stats,
max_num_batched_tokens=max_num_batched_tokens,
enable_chunked_prefill=config.enable_chunked_prefill,
enable_prefix_caching=True,
)
# Offload vllm model to reduce peak memory usage
self.inference_engine.sleep(level=1)
kwargs = dict(
n=1,
logprobs=0, # can be set to 0 and let actor to recompute
max_tokens=config.response_length,
)
# # we may detokenize the result all together later
if vllm_version != '0.3.1':
kwargs['detokenize'] = False
# supporting adding any sampling params from the config file
for k in config.keys():
if hasattr(SamplingParams(), str(k)):
kwargs[k] = config.get(k)
print(f"kwargs: {kwargs}")
self.sampling_params = SamplingParams(**kwargs)
self.pad_token_id = tokenizer.pad_token_id
self.tokenizer = tokenizer
self.executor=PythonExecutor()
# self.code_interpreter=CodeInterpreter()
def _get_prompts_and_indices(self, samples_info):
prompts, indices=[], []
for index, info in enumerate(samples_info):
if not info['stop']:
prompts.append(info['sequence'])
indices.append(info['index'])
return prompts, indices
# def code_interpreter_batch_call(self, tool_inputs):
# with Pool(processes=min(len(tool_inputs),os.cpu_count(), 32)) as pool:
# results = pool.map(self.code_interpreter.call, tool_inputs)
# def postproc(result):
# report=result.split("```")[0].strip()
# output=result.split("```")[-1].split("```")[-1].strip()
# if report=="stdout:": report="Done"
# return (output, report)
# results=[postproc(result) for result in results]
# return results
def code_interpreter_batch_call(self, tool_inputs, timeout=20):
tool_inputs=[{'code': tool_input,'language': 'python'} for tool_input in tool_inputs]
results = [None] * len(tool_inputs)
with ThreadPoolExecutor(max_workers=max(min(len(tool_inputs), os.cpu_count(), 64), 1)) as executor:
future_to_index = {executor.submit(send_request, input): i for i, input in enumerate(tool_inputs)}
for future in as_completed(future_to_index):
index = future_to_index[future]
try:
result = future.result(timeout=timeout)
results[index] = result
except:
results[index] = {"run_result": {"stdout": "Error", "stderr": "TimeoutError"}}
def postproc(output):
try:
if str(output['run_result']['return_code'])=='0' or len(str(output['run_result']['stdout'])) != 0:
return output['run_result']['stdout'], "Done"
else:
return output['run_result']['stdout'], output['run_result']['stderr'].strip()
except Exception:
return "Error", "UnknownError"
results=[postproc(result) for result in results]
return results
def _tokenize_and_find_mask_token_indices(self, sample_info):
response=sample_info['response']
mask_str_ranges=sample_info['mask_info']
encoding=self.tokenizer(response, add_special_tokens=False, return_offsets_mapping=True)
response_token_ids=encoding['input_ids']
offset_mapping_tensor=torch.tensor(encoding['offset_mapping'], dtype=torch.long)
token_starts = offset_mapping_tensor[:,0]
token_ends = offset_mapping_tensor[:,1]
mask_tensor=torch.ones(len(response_token_ids))
for mask_str_range in mask_str_ranges:
start_index, end_index=mask_str_range[0], mask_str_range[1]
mask = (token_starts < end_index) & (token_ends > start_index) & (token_starts >= start_index)
mask_tensor[mask]=0
return response_token_ids, mask_tensor
def _tir_generate(self, prompts=None, sampling_params=None, prompt_token_ids=None, use_tqdm=False):
sampling_params=copy.deepcopy(sampling_params)
# prompts=self.tokenizer.batch_decode(prompt_token_ids, skip_special_tokens=True)
prompts=[self.tokenizer.decode(prompt['prompt_token_ids'], skip_special_tokens=False) for prompt in prompts]
prompts=[prompt for prompt in prompts for _ in range(sampling_params.n) ]
sampling_params.n=1
sampling_params.detokenize=True
sampling_params.stop=["```output"]
samples_info=[{"prompt": prompt, "sequence": prompt, "response": "", "stop": False, "finish_reason": None,"index": index, "mask_info": [], "execution_pass": 0} for index, prompt in enumerate(prompts)]
program2output=[]
num_llm_calls_available=copy.deepcopy(self.config.num_llm_calls_available)
while num_llm_calls_available >= 0:
if num_llm_calls_available==0: sampling_params.stop=None
num_llm_calls_available-=1
# llm generate response, stop at eos token or ```output
input_prompts, indices=self._get_prompts_and_indices(samples_info)
input_prompts = [{
'prompt_token_ids': self.tokenizer.encode(x, add_special_tokens=False)[:self.config.prompt_length+self.config.response_length]} for x in input_prompts]
outputs = self.inference_engine.generate(prompts=input_prompts, sampling_params=sampling_params, use_tqdm=use_tqdm)
sorted_outputs = sorted(outputs, key=lambda output: int(output.request_id))
responses=[x.outputs[0].text for x in sorted_outputs]
finish_reason=[x.outputs[0].finish_reason for x in sorted_outputs]
stop_reason=[x.outputs[0].stop_reason for x in sorted_outputs]
if num_llm_calls_available==-1:
for i ,index in enumerate(indices):
samples_info[index]['response']+=responses[i]
samples_info[index]['sequence']+=responses[i]
samples_info[index]['stop']=True
samples_info[index]['finish_reason']=finish_reason[i]
break
def _python_execution(finish_reason, stop_reason):
if finish_reason=='stop' and stop_reason==None: return False
if finish_reason=='stop' and stop_reason=='```output': return True
if finish_reason=='length': False
return False
is_execution=[_python_execution(finish_reason[i], stop_reason[i]) for i in range(len(finish_reason))]
# check if all samples are finished
if all([not x for x in is_execution]): break
# prepare for python execution
tool_infos=[ _detect_tool(response) for response in responses]
tool_indices=[]
tool_inputs=[]
for i, tool_info in enumerate(tool_infos):
if tool_info[0] and is_execution[i]:
tool_indices.append(i)
tool_inputs.append(tool_info[2])
def postproc_observation(observation):
execution_pass=0
try:
observation_list=observation
if observation_list[-1] == 'Done':
observation = observation_list[0]
execution_pass=1
else:
observation = observation_list[-1]
except Exception:
observation="Error"
if "Error" in observation: observation=observation.strip().split("\n")[-1]
if len(observation.strip())==0: observation="timeout_decorator.timeout_decorator.TimeoutError: 'Timed Out'"
observation = observation.strip()
if len(observation)>=256:
observation = observation[:128]+"..."+observation[-128:]
observation = f'{OBS_START}\n{observation}{OBS_END}'
return observation, execution_pass
# execute python code
# observations=self.executor.batch_apply([json5.loads(x)['code'] for x in tool_inputs])
observations=self.code_interpreter_batch_call([json5.loads(x)['code'] for x in tool_inputs])
# construction responses from observations
responses=[response+"\n" if not response.endswith('\n') else response for response in responses]
responses_w_res=copy.deepcopy(responses)
execution_passes=[0 for _ in range(len(responses))]
for i, index in enumerate(tool_indices):
processed_observation=postproc_observation(observations[i])
responses_w_res[index]+=processed_observation[0]
execution_passes[index]=processed_observation[1]
# program2output.append([{"code": tool_input, "answer": postproc_observation(observations[idx])} for idx, tool_input in enumerate(tool_inputs)])
# update samples_info
for i ,index in enumerate(indices):
mask=[ len(responses[i]) + len('```output'), len(responses_w_res[i]) ]
samples_info[index]['mask_info'].append(mask)
samples_info[index]['response']+=responses_w_res[i]
samples_info[index]['sequence']+=responses_w_res[i]
samples_info[index]['stop']=not is_execution[i]
samples_info[index]['finish_reason']=finish_reason[i]
samples_info[index]['execution_pass']=execution_passes[i]
for i, line in enumerate(samples_info):
if samples_info[i]['finish_reason']!='length': samples_info[i]['response']+=self.tokenizer.eos_token
responses_ids=[]
tool_output_masks=[]
execution_passes=[]
for idx, sample_info in enumerate(samples_info):
response_id, tool_output_mask = self._tokenize_and_find_mask_token_indices(sample_info)
responses_ids.append(response_id[:self.config.response_length])
tool_output_masks.append(tool_output_mask[:self.config.response_length])
execution_passes.append(sample_info['execution_pass'])
# save id and mask to check correctness
# samples_info[idx]['responses_id']=response_id[:self.config.response_length]
# samples_info[idx]['tool_output_mask']=tool_output_mask[:self.config.response_length].tolist()
# with open("/mnt/bn/seedllm3-lixuefeng-2/code/o1/verl-tir/sample_infos.json", 'w', encoding='utf-8') as f:
# json.dump(samples_info, f, ensure_ascii=False, indent=2)
return responses_ids, tool_output_masks, torch.tensor(execution_passes, dtype=torch.long)
@contextmanager
def update_sampling_params(self, **kwargs):
# update sampling params
old_sampling_params_args = {}
if kwargs:
for key, value in kwargs.items():
if hasattr(self.sampling_params, key):
old_value = getattr(self.sampling_params, key)
old_sampling_params_args[key] = old_value
setattr(self.sampling_params, key, value)
yield
# roll back to previous sampling params
# if len(old_sampling_params_args):
for key, value in old_sampling_params_args.items():
setattr(self.sampling_params, key, value)
@torch.no_grad()
def generate_sequences(self, prompts: DataProto, **kwargs) -> DataProto:
# rebuild vllm cache engine
if vllm_version in ('0.3.1', '0.4.2', '0.5.4', '0.6.3') and self.config.free_cache_engine:
self.inference_engine.init_cache_engine()
idx = prompts.batch['input_ids'] # (bs, prompt_length)
# left-padded attention_mask
attention_mask = prompts.batch['attention_mask']
position_ids = prompts.batch['position_ids']
# used to construct attention_mask
eos_token_id = prompts.meta_info['eos_token_id']
batch_size = idx.size(0)
non_tensor_batch = prompts.non_tensor_batch
if 'raw_prompt_ids' not in non_tensor_batch:
non_tensor_batch['raw_prompt_ids'] = np.array(
[_pre_process_inputs(self.pad_token_id, idx[i]) for i in range(batch_size)], dtype=object)
if batch_size != len(non_tensor_batch['raw_prompt_ids']):
raise RuntimeError('vllm sharding manager is not work properly.')
if 'multi_modal_data' in non_tensor_batch:
vllm_inputs = []
for raw_prompt_ids, multi_modal_data in zip(non_tensor_batch.pop('raw_prompt_ids'),
non_tensor_batch.pop('multi_modal_data')):
vllm_inputs.append({'prompt_token_ids': raw_prompt_ids, 'multi_modal_data': multi_modal_data})
else:
vllm_inputs = [{
'prompt_token_ids': raw_prompt_ids
} for raw_prompt_ids in non_tensor_batch.pop('raw_prompt_ids')]
do_sample = prompts.meta_info.get('do_sample', True)
if not do_sample:
kwargs = {
'best_of': 1,
'top_p': 1.0,
'top_k': -1,
'min_p': 0.0,
'temperature': 0,
'n': 1 # if greedy, only 1 response
}
# users can customize different sampling_params at different run
with self.update_sampling_params(**kwargs):
response, tool_output_masks, execution_passes = self._tir_generate(
prompts=vllm_inputs, # because we have already convert it to prompt token id
sampling_params=self.sampling_params,
use_tqdm=False)
# TODO(sgm): disable logprob when recompute_log_prob is enable
# if n = 1: (bs, response_length) ; if n > 1: (bs * n, response_length)
# response = []
# for output in outputs:
# for sample_id in range(len(output.outputs)):
# response.append(output.outputs[sample_id].token_ids)
response = pad_2d_list_to_length(response, self.pad_token_id,
max_length=self.config.response_length).to(idx.device)
tool_output_masks = pad_2d_list_to_length(tool_output_masks, 1,
max_length=self.config.response_length).to(idx.device).int()
execution_passes = execution_passes.to(idx.device).int()
if self.config.n > 1 and do_sample:
idx = _repeat_interleave(idx, self.config.n)
attention_mask = _repeat_interleave(attention_mask, self.config.n)
position_ids = _repeat_interleave(position_ids, self.config.n)
batch_size = batch_size * self.config.n
if 'multi_modal_inputs' in non_tensor_batch.keys():
non_tensor_batch['multi_modal_inputs'] = _repeat_interleave(non_tensor_batch['multi_modal_inputs'],
self.config.n)
seq = torch.cat([idx, response], dim=-1)
response_length = response.size(1)
delta_position_id = torch.arange(1, response_length + 1, device=position_ids.device)
delta_position_id = delta_position_id.unsqueeze(0).expand(batch_size, -1)
if position_ids.dim() == 3: # qwen2vl mrope
delta_position_id = delta_position_id.view(batch_size, 1, -1).expand(batch_size, 3, -1)
# TODO(sgm): fix position_ids on right_pad
# prompt: left pad + response: right pad
# attention_mask: [0,0,0,0,1,1,1,1, | 1,1,1,0,0,0,0,0]
# position_ids: [0,0,0,0,0,1,2,3, | 4,5,6,7,8,9,10,11]
response_position_ids = position_ids[:, -1:] + delta_position_id
position_ids = torch.cat([position_ids, response_position_ids], dim=-1)
response_attention_mask = get_eos_mask(response_id=response, eos_token=eos_token_id, dtype=attention_mask.dtype)
# response_attention_mask = response_attention_mask & tool_output_masks
attention_mask = torch.cat((attention_mask, response_attention_mask), dim=-1)
# all the tp ranks should contain the same data here. data in all ranks are valid
batch = TensorDict(
{
'prompts': idx,
'responses': response,
'input_ids': seq, # here input_ids become the whole sentences
# 'old_log_probs': log_probs, # we will recompute old log prob with actor
'attention_mask': attention_mask,
'tool_output_masks': tool_output_masks,
'position_ids': position_ids,
'execution_passes': execution_passes,
},
batch_size=batch_size)
# free vllm cache engine
if vllm_version in ('0.3.1', '0.4.2', '0.5.4', '0.6.3') and self.config.free_cache_engine:
self.inference_engine.free_cache_engine()
return DataProto(batch=batch, non_tensor_batch=non_tensor_batch)
================================================
FILE: verl/workers/sharding_manager/__init__.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from verl.utils.import_utils import is_vllm_available, is_megatron_core_available
from .base import BaseShardingManager
from .fsdp_ulysses import FSDPUlyssesShardingManager
AllGatherPPModel = None
if is_megatron_core_available() and is_vllm_available():
from .megatron_vllm import AllGatherPPModel, MegatronVLLMShardingManager
elif AllGatherPPModel is not None:
pass
else:
AllGatherPPModel = None
MegatronVLLMShardingManager = None
if is_vllm_available():
from .fsdp_vllm import FSDPVLLMShardingManager
else:
FSDPVLLMShardingManager = None
================================================
FILE: verl/workers/sharding_manager/base.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sharding manager to implement HybridEngine
"""
from verl import DataProto
class BaseShardingManager:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
def preprocess_data(self, data: DataProto) -> DataProto:
return data
def postprocess_data(self, data: DataProto) -> DataProto:
return data
================================================
FILE: verl/workers/sharding_manager/fsdp_ulysses.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains a resharding manager that binds weights from FSDP zero3 to XPerfGPT
"""
from .base import BaseShardingManager
from torch.distributed.device_mesh import DeviceMesh
from verl.utils.torch_functional import allgather_dict_tensors
from verl.utils.ulysses import set_ulysses_sequence_parallel_group, get_ulysses_sequence_parallel_group
import numpy as np
import torch
import torch.distributed
from verl import DataProto
class FSDPUlyssesShardingManager(BaseShardingManager):
"""
Sharding manager to support data resharding when using FSDP + Ulysses
"""
def __init__(self, device_mesh: DeviceMesh):
super().__init__()
self.device_mesh = device_mesh
self.seed_offset = 12345
def __enter__(self):
if self.device_mesh is not None:
# We have a global SP group
# so we have to change to use model-specific sp group
self.prev_sp_group = get_ulysses_sequence_parallel_group()
set_ulysses_sequence_parallel_group(self.device_mesh['sp'].get_group())
# TODO: check how to set seed for each model
def __exit__(self, exc_type, exc_value, traceback):
# restore random states
if self.device_mesh is not None:
# revert to previous sp group
set_ulysses_sequence_parallel_group(self.prev_sp_group)
# TODO: check how to set seed for each model
def preprocess_data(self, data: DataProto) -> DataProto:
"""
AllGather data from sp region
This is because the data is first sharded along the FSDP dimension as we utilize the DP_COMPUTE
In Ulysses, we need to make sure the same data is used across a SP group
"""
if self.device_mesh is not None:
sp_size = self.device_mesh['sp'].size()
group = self.device_mesh['sp'].get_group()
prev_device = data.batch.device
data.batch = data.batch.cuda(device=torch.cuda.current_device())
data.batch = allgather_dict_tensors(data.batch.contiguous(), size=sp_size, group=group, dim=0)
data.batch = data.batch.to(prev_device)
# all gather non_tensor_batch
all_non_tensor_batch = [None for _ in range(sp_size)]
torch.distributed.all_gather_object(all_non_tensor_batch, data.non_tensor_batch, group=group)
data.non_tensor_batch = {
k: np.concatenate([d[k] for d in all_non_tensor_batch]) for k in data.non_tensor_batch
}
return data
def postprocess_data(self, data: DataProto) -> DataProto:
"""
Split the data to follow FSDP partition
"""
if self.device_mesh is not None:
sp_size = self.device_mesh['sp'].size()
sp_rank = self.device_mesh['sp'].get_local_rank()
data = data.chunk(chunks=sp_size)[sp_rank]
return data
================================================
FILE: verl/workers/sharding_manager/fsdp_vllm.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import torch
import numpy as np
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.api import ShardingStrategy, ShardedStateDictConfig, StateDictType, FullStateDictConfig
from torch.distributed.device_mesh import DeviceMesh
from verl.third_party.vllm import LLM
from verl.third_party.vllm import parallel_state as vllm_ps
from verl import DataProto
from verl.utils.torch_functional import (broadcast_dict_tensor, allgather_dict_tensors)
from verl.utils.debug import log_gpu_memory_usage
from verl.third_party.vllm import vllm_version
from .base import BaseShardingManager
logger = logging.getLogger(__file__)
logger.setLevel(os.getenv('VERL_PPO_LOGGING_LEVEL', 'WARN'))
class FSDPVLLMShardingManager(BaseShardingManager):
def __init__(self,
module: FSDP,
inference_engine: LLM,
model_config,
full_params: bool = False,
device_mesh: DeviceMesh = None):
self.module = module
self.inference_engine = inference_engine
self.model_config = model_config
self.device_mesh = device_mesh
# Full params
self.full_params = full_params
if full_params:
FSDP.set_state_dict_type(self.module,
state_dict_type=StateDictType.FULL_STATE_DICT,
state_dict_config=FullStateDictConfig())
else:
FSDP.set_state_dict_type(self.module,
state_dict_type=StateDictType.SHARDED_STATE_DICT,
state_dict_config=ShardedStateDictConfig())
# Note that torch_random_states may be different on each dp rank
self.torch_random_states = torch.cuda.get_rng_state()
# get a random rng states
if self.device_mesh is not None:
gen_dp_rank = self.device_mesh['dp'].get_local_rank()
torch.cuda.manual_seed(gen_dp_rank + 1000) # make sure all tp ranks have the same random states
self.gen_random_states = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(self.torch_random_states)
else:
self.gen_random_states = None
def __enter__(self):
log_gpu_memory_usage('Before state_dict() in sharding manager memory', logger=logger)
params = self.module.state_dict()
log_gpu_memory_usage('After state_dict() in sharding manager memory', logger=logger)
# Copy, not share memory
load_format = 'hf' if self.full_params else 'dtensor'
if vllm_version in ('0.4.2', '0.5.4', '0.6.3'):
self.inference_engine.sync_model_weights(params, load_format=load_format)
else:
self.inference_engine.wake_up()
# TODO(ZSL): deal with 'hf' format
if load_format == 'dtensor':
from verl.third_party.vllm import load_dtensor_weights
load_dtensor_weights(
params, self.inference_engine.llm_engine.model_executor.driver_worker.worker.model_runner.model)
else:
raise NotImplementedError(f'load_format {load_format} not implemented')
log_gpu_memory_usage('After sync model weights in sharding manager', logger=logger)
del params
torch.cuda.empty_cache()
log_gpu_memory_usage('After del state_dict and empty_cache in sharding manager', logger=logger)
# TODO: offload FSDP model weights
# self.module.cpu()
# torch.cuda.empty_cache()
# if torch.distributed.get_rank() == 0:
# print(f'after model to cpu in sharding manager memory allocated: {torch.cuda.memory_allocated() / 1e9}GB, reserved: {torch.cuda.memory_reserved() / 1e9}GB')
# important: need to manually set the random states of each tp to be identical.
if self.device_mesh is not None:
self.torch_random_states = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(self.gen_random_states)
def __exit__(self, exc_type, exc_value, traceback):
log_gpu_memory_usage('Before vllm offload in sharding manager', logger=logger)
# TODO(ZSL): check this
if vllm_version in ('0.4.2', '0.5.4', '0.6.3'):
self.inference_engine.offload_model_weights()
else:
self.inference_engine.sleep(level=1)
log_gpu_memory_usage('After vllm offload in sharding manager', logger=logger)
# self.module.to('cuda')
# if torch.distributed.get_rank() == 0:
# print(f'after actor module to cuda in sharding manager memory allocated: {torch.cuda.memory_allocated() / 1e9}GB, reserved: {torch.cuda.memory_reserved() / 1e9}GB')
self.module.train()
# add empty cache after each compute
torch.cuda.empty_cache()
# restore random states
if self.device_mesh is not None:
self.gen_random_states = torch.cuda.get_rng_state()
torch.cuda.set_rng_state(self.torch_random_states)
def preprocess_data(self, data: DataProto) -> DataProto:
# TODO: Current impl doesn't consider FSDP with torch micro-dp
tp_size = vllm_ps.get_tensor_model_parallel_world_size()
if vllm_version in ('0.3.1', '0.4.2', '0.5.4', '0.6.3'):
group = vllm_ps.get_tensor_model_parallel_group()
else:
group = vllm_ps.get_tensor_model_parallel_group().device_group
prev_device = data.batch.device
data.batch = data.batch.cuda(device=torch.cuda.current_device())
data.batch = allgather_dict_tensors(data.batch.contiguous(), size=tp_size, group=group, dim=0)
data.batch = data.batch.to(prev_device)
# all gather non_tensor_batch
all_non_tensor_batch = [None for _ in range(tp_size)]
torch.distributed.all_gather_object(all_non_tensor_batch, data.non_tensor_batch, group=group)
data.non_tensor_batch = {k: np.concatenate([d[k] for d in all_non_tensor_batch]) for k in data.non_tensor_batch}
return data
def postprocess_data(self, data: DataProto) -> DataProto:
# TODO: Current impl doesn't consider FSDP with torch micro-dp
local_world_size = vllm_ps.get_tensor_model_parallel_world_size()
src_rank = (torch.distributed.get_rank() // local_world_size) * local_world_size
if vllm_version in ('0.3.1', '0.4.2', '0.5.4', '0.6.3'):
broadcast_dict_tensor(data.batch, src=src_rank, group=vllm_ps.get_tensor_model_parallel_group())
else:
broadcast_dict_tensor(data.batch,
src=src_rank,
group=vllm_ps.get_tensor_model_parallel_group().device_group)
dp_rank = torch.distributed.get_rank()
dp_size = torch.distributed.get_world_size() # not consider torch micro-dp
tp_size = vllm_ps.get_tensor_model_parallel_world_size()
if tp_size > 1:
# TODO: shall we build a micro_dp group for vllm when integrating with vLLM?
local_prompts = data.chunk(chunks=tp_size)
data = local_prompts[dp_rank % tp_size]
return data
================================================
FILE: verl/workers/sharding_manager/megatron_vllm.py
================================================
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains a Megatron style Hybrid Engine that shares the weights of the actor with the inference engine.
"""
import importlib
from packaging.version import Version
import torch
import torch.distributed as dist
from torch import nn
from megatron.core import parallel_state as mpu
from megatron.core import DistributedDataParallel as LocalDDP
from megatron.core.transformer.module import Float16Module
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from verl.utils.megatron_utils import get_model, unwrap_model
from verl.utils.memory_buffer import (
build_memory_buffer,
build_memory_reference_from_module,
get_weight_buffer_meta_from_module,
)
class AllGatherPPModel:
def __init__(self, model_provider) -> None:
self._pp_group = mpu.get_pipeline_model_parallel_group()
self._pp_rank = mpu.get_pipeline_model_parallel_rank()
self._pp_size = mpu.get_pipeline_model_parallel_world_size()
self._vpp_size = mpu.get_virtual_pipeline_model_parallel_world_size()
self._model_chunk_size = self._vpp_size or 1
# each one holds a list of model_chunks in this pp stage
self._pp_models = [None] * self.pp_size
rank_list = list(range(self.pp_size))
# make current rank the last one to initialize
rank_list[self.pp_rank], rank_list[-1] = rank_list[-1], rank_list[self.pp_rank]
self._this_rank_models = None
# store the parameter of each pp stage
self.memory_buffers = [None] * self.pp_size
for cur_pp_rank in rank_list:
print(
f'create pp model', f'torch allocated {torch.cuda.memory_allocated() / 1e9:.4f} GB, '
f'reserved {torch.cuda.memory_reserved() / 1e9:.4f} GB')
# since the last initialized rank is the current pp rank, after init, the pp rank is still correct
mpu.set_pipeline_model_parallel_rank(cur_pp_rank)
if cur_pp_rank != self.pp_rank:
models = get_model(model_provider, wrap_with_ddp=False)
models = nn.ModuleList(models)
assert len(models) == self._model_chunk_size, f"{len(models)} != {self._model_chunk_size}"
self.pp_models[cur_pp_rank] = models
else:
# for regular model, we wrapped it with DDP
models = get_model(model_provider)
assert len(models) == self._model_chunk_size, f"{len(models)} != {self._model_chunk_size}"
self._this_rank_models = nn.ModuleList(models)
self.pp_models[cur_pp_rank] = nn.ModuleList(unwrap_model(models, (torchDDP, LocalDDP)))
self._build_param_buffer(cur_pp_rank)
self._build_param_references(cur_pp_rank, maintain_weight=cur_pp_rank == self.pp_rank)
# TODO: after binding to the memory buffer, we can load the checkpoint here
if cur_pp_rank != self.pp_rank:
for model in self.pp_models[cur_pp_rank]:
model.eval()
self._offload_params_to_cpu(cur_pp_rank)
def _build_param_buffer(self, pp_rank):
"""Build the parameter buffer in each pp rank"""
if pp_rank == self._pp_rank:
from verl.utils.memory_buffer import MemoryBuffer
# The code here is very hard-coded, based on the following assumptions:
# 1. `len(_this_rank_models) == 1`
# 2. `_this_rank_models[0]` is a instance of `DistributedDataParallel` and `use_distributed_optimizer=True`
# 3. Only bfloat16 data type is used in parameters
source = self._this_rank_models[0].buffers[0].param_data
self.memory_buffers[pp_rank] = {
torch.bfloat16: MemoryBuffer(source.numel(), source.numel(), torch.bfloat16, source)
}
else:
model = self.pp_models[pp_rank]
weight_buffer_meta = get_weight_buffer_meta_from_module(model)
self.memory_buffers[pp_rank] = build_memory_buffer(weight_buffer_meta)
def _build_param_references(self, pp_rank, maintain_weight=False):
if pp_rank == self._pp_rank:
return
model = self.pp_models[pp_rank]
build_memory_reference_from_module(model, self.memory_buffers[pp_rank], maintain_weight=maintain_weight)
def _load_params_to_cuda(self, pp_rank, to_empty=False):
assert pp_rank != self.pp_rank, f"unexpected to load current pp rank [{pp_rank}] back to cuda"
for buffer in self.memory_buffers[pp_rank].values():
if not to_empty:
buffer.data = buffer.data.to(torch.cuda.current_device(), non_blocking=True)
else:
buffer.data = torch.empty_like(buffer.data, device='cuda')
# rebuild reference after loading to CUDA
self._build_param_references(pp_rank)
def _offload_params_to_cpu(self, pp_rank, to_empty=False):
assert pp_rank != self.pp_rank, f"unexpected to offload current pp rank [{pp_rank}] to cpu"
for buffer in self.memory_buffers[pp_rank].values():
if not to_empty:
# offload the whole memory buffer to CPU
buffer.data = buffer.data.to('cpu', non_blocking=True)
else:
buffer.data = torch.empty_like(buffer.data, device='cpu')
self._build_param_references(pp_rank)
def load_params_to_cuda(self, to_empty=False):
"""load all model params to cuda"""
for cur_pp_rank in range(self.pp_size):
if cur_pp_rank != self.pp_rank:
self._load_params_to_cuda(cur_pp_rank, to_empty=to_empty)
def allgather_params(self):
"""allgather params of all pp ranks. Return a list of handles"""
for cur_pp_rank in range(self.pp_size):
global_src = dist.get_global_rank(group=self.pp_group, group_rank=cur_pp_rank)
# NOTE(sgm): the async op may cause memory leakage of the memory_buffer/pp_models
for _, param in sorted(self.pp_models[cur_pp_rank].named_parameters()):
dist.broadcast(tensor=param.data, src=global_src, group=self.pp_group, async_op=False)
def forward(self, *inputs, **kwargs):
try:
prev_output = None
for cur_chunk_rank in range(self._model_chunk_size):
if self._vpp_size:
mpu.set_virtual_pipeline_model_parallel_rank(cur_chunk_rank)
for cur_pp_rank in range(self.pp_size):
mpu.set_pipeline_model_parallel_rank(cur_pp_rank)
self.pp_models[cur_pp_rank][cur_chunk_rank].set_input_tensor(prev_output)
ret = self.pp_models[cur_pp_rank][cur_chunk_rank](*inputs, **kwargs)
self.pp_models[cur_pp_rank][cur_chunk_rank].set_input_tensor(None)
prev_output = ret
finally:
if self._vpp_size:
mpu.set_virtual_pipeline_model_parallel_rank(0)
mpu.set_pipeline_model_parallel_rank(self.pp_rank)
return ret
def __call__(self, *inputs, **kwargs):
return self.forward(*inputs, **kwargs)
def eval(self):
for model in self.pp_models[self.pp_rank]:
model.eval()
def train(self):
for model in self.pp_models[self.pp_rank]:
model.train()
def offload_params_to_cpu(self, to_empty=False):
"""offload params of models that are not of current pp rank to cpu"""
for cur_pp_rank in range(self.pp_size):
if cur_pp_rank != self.pp_rank:
self._offload_params_to_cpu(cur_pp_rank, to_empty=to_empty)
def get_all_params(self):
"""Get all the parameters of the models in all pp ranks
Returns:
params: List[List[Dict[str, Tensor]]]: a list of parameters in all pp, where each is a list of dict
tensors of each model chunk
"""
params = []
for pp_rank in range(self.pp_size):
params.append([])
for model_chunk_idx in range(len(self.pp_models[pp_rank])):
params[pp_rank].append({})
pp_model = self.pp_models[pp_rank][model_chunk_idx]
pp_model = unwrap_model(pp_model, ((torchDDP, LocalDDP, Float16Module))) # not use Float16Module
for name, param in pp_model.named_parameters():
# NOTE(gh) workaround: should not get lora params for inference
if 'lora' in name:
continue
params[pp_rank][model_chunk_idx][name] = param
return params
def update_this_rank_models(self, new_models):
self._this_rank_models = new_models
self._pp_models[self.pp_rank] = unwrap_model(new_models, (torchDDP, LocalDDP))
@property
def this_rank_models(self):
return self._this_rank_models
@property
def pp_size(self):
return self._pp_size
@property
def pp_rank(self):
return self._pp_rank
@property
def pp_group(self):
return self._pp_group
@property
def pp_models(self):
return self._pp_models
"""
Megatron Hybrid Engine:
- During training, only the current pp stage holds the parameters
- Before inference, broadcast the parameters of the current pp rank to all other pp ranks (all pp ranks holds all the parameters)
- Bind the parameters to the inference engine
- Do inference in tp. pp is treated as additional dp
- After inference, all the parameters that doesn't belong to this pp rank is freed.
"""
from .base import BaseShardingManager
import torch
from torch import nn
import torch.distributed
from torch.distributed import new_group
from verl import DataProto
from verl.utils.torch_functional import (broadcast_dict_tensor, allgather_dict_tensors)
import verl.utils.megatron.tensor_parallel as tp_utils
from verl.third_party.vllm import parallel_state as vllm_ps
from verl.third_party.vllm import LLM
from verl.utils.model import normalize_pp_vpp_params
# Micro Data parallel group. Micro data parallel group is additional dp group that origins from splitting training tp
# into infer_tp and micro_tp. By default, we use order micro_dp - tp
_MICRO_DATA_PARALLEL_GROUP = None
class MegatronVLLMShardingManager(BaseShardingManager):
def __init__(self, module: AllGatherPPModel, inference_engine: LLM, model_config, layer_name_mapping):
self.module = module
self.inference_engine = inference_engine
self.model_config = model_config
self.layer_name_mapping = layer_name_mapping
# initialize micro_dp group for vllm inference
global _MICRO_DATA_PARALLEL_GROUP
world_size = torch.distributed.get_world_size()
rank = torch.distributed.get_rank()
train_tensor_parallel_size = mpu.get_tensor_model_parallel_world_size()
infer_tensor_parallel_size = vllm_ps.get_tensor_model_parallel_world_size()
# TODO(sgm): this may not be true for FSDP -> vLLM
assert infer_tensor_parallel_size <= train_tensor_parallel_size, \
'Not implemented for infer_tp > train_tp'
assert train_tensor_parallel_size % infer_tensor_parallel_size == 0
micro_dp_size = train_tensor_parallel_size // infer_tensor_parallel_size
num_micro_dp_groups = world_size // micro_dp_size
assert _MICRO_DATA_PARALLEL_GROUP is None, ("micro data parallel group is already initialized")
for i in range(num_micro_dp_groups):
ranks = range(i * micro_dp_size, (i + 1) * micro_dp_size)
group = new_group(ranks=ranks)
if rank in ranks:
_MICRO_DATA_PARALLEL_GROUP = group
def default_tp_concat_fn(self, name, param, infer_params, model_config):
"""
name: name of the parameter
param: training parameters
infer_params (List[torch.Tensor]): a list of parameters all-gathered from micro_dp_group
model_config: huggingface model_config
TODO(zhangchi.usc1992): currently, the implementation is adhoc. We can move this function to the model
definition so that it is model-agnostic. If the model doesn't implement this function,
we can throw an error to force user disable TP HybridEngine.
"""
if self.layer_name_mapping.get("qkv_layer_name") in name:
# if the tensor is qkv, for each param on tp, split into q, k, v
# concat q, k, v separately.
q_lst = []
k_lst = []
v_lst = []
assert model_config.num_attention_heads % model_config.num_key_value_heads == 0
num_q_per_kv = model_config.num_attention_heads // model_config.num_key_value_heads
assert infer_params[0].shape[0] % (num_q_per_kv + 2) == 0
kv_size_per_tp = infer_params[0].shape[0] // (num_q_per_kv + 2)
split_size = [kv_size_per_tp * num_q_per_kv, kv_size_per_tp, kv_size_per_tp]
for infer_param in infer_params:
q, k, v = infer_param.split(split_size)
q_lst.append(q)
k_lst.append(k)
v_lst.append(v)
q = torch.cat(q_lst, dim=0)
k = torch.cat(k_lst, dim=0)
v = torch.cat(v_lst, dim=0)
infer_params = torch.cat((q, k, v), dim=0)
elif self.layer_name_mapping.get("gate_proj_layer_name") in name:
# if the tensor is gate and proj
gate_lst = []
up_lst = []
for infer_param in infer_params:
gate, up = infer_param.chunk(2)
gate_lst.append(gate)
up_lst.append(up)
gate = torch.cat(gate_lst, dim=0)
up = torch.cat(up_lst, dim=0)
infer_params = torch.cat((gate, up), dim=0)
else:
# concat tensor
infer_params = torch.cat(infer_params, dim=tp_utils.get_tensor_parallel_partition_dim(param))
return infer_params
def _post_process_params(self, params):
"""
For each param, if it is a tp-splited param, we all-gather from micro_dp group.
"""
# here the params are in train tp format. we iterate params and all-gather
# TODO(zhangchi.usc1992) We can consider copy non-tp weight to another infer buffer.
# In this way, all the params in the original memory_buffers and can be offload.
micro_dp_size = get_micro_data_parallel_world_size()
micro_dp_group = get_micro_data_parallel_group()
if micro_dp_size <= 1:
return
origin_params = {}
for name in params.keys():
param = params[name]
if tp_utils.is_tensor_parallel_param(param):
# allocate a new tensor with proper size
infer_params = [torch.empty_like(param) for _ in range(micro_dp_size)]
torch.distributed.all_gather(infer_params, param, group=micro_dp_group)
infer_params = self.default_tp_concat_fn(name, param, infer_params, self.model_config)
# replace with original param
params[name] = infer_params
origin_params[name] = param
return origin_params
def __enter__(self):
# create a new cuda space for parameters not in this pp rank
self.module.load_params_to_cuda()
# broadcast the parameters from pp rank to other ranks
self.module.allgather_params()
# obtain name to parameters in pp/vpp
params = self.module.get_all_params()
# bind the params to inference engine
self.params = normalize_pp_vpp_params(params=params,
num_hidden_layers=self.model_config.num_hidden_layers,
layer_name='layers')
self.origin_params = self._post_process_params(self.params)
self.inference_engine.sync_model_weights(self.params, load_format='megatron')
def __exit__(self, exc_type, exc_value, traceback):
# offload parameters doesn't belong to this pp rank
self.module.offload_params_to_cpu()
# FIXME(sgm): the best practice is to delete the cuda tensor
# rebind the model weights, can be any cpu tensor
if get_micro_data_parallel_world_size() > 1:
for name in self.params.keys():
self.params[name] = self.origin_params[name]
# self.inference_engine.sync_model_weights(params)
self.inference_engine.offload_model_weights()
self.module.train()
# add empty cache after each compute
torch.cuda.empty_cache()
def preprocess_data(self, data: DataProto) -> DataProto:
# prompts are identical for each training tp. We select for each inference tp
micro_dp_size = get_micro_data_parallel_world_size()
micro_dp_rank = get_micro_data_parallel_rank()
# broadcast from tp=0 to other tp ranks
broadcast_dict_tensor(data.batch,
src=mpu.get_tensor_model_parallel_src_rank(),
group=mpu.get_tensor_model_parallel_group())
if micro_dp_size > 1:
local_prompts = data.chunk(chunks=micro_dp_size)
data = local_prompts[micro_dp_rank]
return data
def postprocess_data(self, data: DataProto) -> DataProto:
meta_info = data.meta_info
# all gather batch among micro-dp groups
micro_dp_size = get_micro_data_parallel_world_size()
if micro_dp_size > 1:
data.batch = allgather_dict_tensors(data.batch.contiguous(),
size=get_micro_data_parallel_world_size(),
group=get_micro_data_parallel_group(),
dim=0)
# all gather batch among pp group
if meta_info.get('allgather_pp_output', True):
data.batch = allgather_dict_tensors(data.batch.contiguous(),
size=mpu.get_pipeline_model_parallel_world_size(),
group=mpu.get_pipeline_model_parallel_group(),
dim=0)
return data
"""
Micro Data parallel group
"""
def get_micro_data_parallel_group():
assert _MICRO_DATA_PARALLEL_GROUP is not None
return _MICRO_DATA_PARALLEL_GROUP
def get_micro_data_parallel_world_size():
return torch.distributed.get_world_size(group=get_micro_data_parallel_group())
def get_micro_data_parallel_rank():
return torch.distributed.get_rank(group=get_micro_data_parallel_group())