Repository: shufangxun/LLaVA-MoD Branch: main Commit: 481ca57856c8 Files: 181 Total size: 9.9 MB Directory structure: gitextract_kubh4hae/ ├── LICENSE ├── README.md ├── docs/ │ ├── INFERENCE.md │ └── TRAIN_EVAL.md ├── llavamod/ │ ├── __init__.py │ ├── config/ │ │ ├── __init__.py │ │ ├── args.py │ │ └── dpconfig/ │ │ ├── dpo_zero2.json │ │ ├── dpo_zero2_offload.json │ │ ├── zero2.json │ │ ├── zero2_offload.json │ │ ├── zero3.json │ │ └── zero3_offload.json │ ├── constants.py │ ├── conversation.py │ ├── data/ │ │ ├── __init__.py │ │ ├── data_utils.py │ │ └── dataset.py │ ├── eval/ │ │ ├── __init__.py │ │ ├── eval_gpt_mmhal.py │ │ ├── eval_gpt_objhal.py │ │ ├── eval_gpt_review.py │ │ ├── eval_gpt_review_bench.py │ │ ├── eval_gpt_review_visual.py │ │ ├── eval_gqa.py │ │ ├── eval_gqa_1.py │ │ ├── eval_pope.py │ │ ├── eval_science_qa.py │ │ ├── eval_science_qa_gpt4.py │ │ ├── eval_science_qa_gpt4_requery.py │ │ ├── eval_textvqa.py │ │ ├── generate_webpage_data_from_table.py │ │ ├── gpt4_grpc.py │ │ ├── m4c_evaluator.py │ │ ├── model_qa.py │ │ ├── model_vqa.py │ │ ├── model_vqa_loader.py │ │ ├── model_vqa_mmbench.py │ │ ├── model_vqa_mmhal.py │ │ ├── model_vqa_objhal.py │ │ ├── model_vqa_qbench.py │ │ ├── model_vqa_science.py │ │ ├── qa_baseline_gpt35.py │ │ ├── run_llava.py │ │ ├── summarize_gpt_review.py │ │ ├── table/ │ │ │ ├── answer/ │ │ │ │ ├── answer_alpaca-13b.jsonl │ │ │ │ ├── answer_bard.jsonl │ │ │ │ ├── answer_gpt35.jsonl │ │ │ │ ├── answer_llama-13b.jsonl │ │ │ │ └── answer_vicuna-13b.jsonl │ │ │ ├── caps_boxes_coco2014_val_80.jsonl │ │ │ ├── model.jsonl │ │ │ ├── prompt.jsonl │ │ │ ├── question.jsonl │ │ │ ├── results/ │ │ │ │ ├── test_sqa_llava_13b_v0.json │ │ │ │ └── test_sqa_llava_lcs_558k_sqa_12e_vicuna_v1_3_13b.json │ │ │ ├── review/ │ │ │ │ ├── review_alpaca-13b_vicuna-13b.jsonl │ │ │ │ ├── review_bard_vicuna-13b.jsonl │ │ │ │ ├── review_gpt35_vicuna-13b.jsonl │ │ │ │ └── review_llama-13b_vicuna-13b.jsonl │ │ │ ├── reviewer.jsonl │ │ │ └── rule.json │ │ └── webpage/ │ │ ├── index.html │ │ ├── script.js │ │ └── styles.css │ ├── mm_utils.py │ ├── model/ │ │ ├── __init__.py │ │ ├── apply_delta.py │ │ ├── builder.py │ │ ├── cache_utils.py │ │ ├── consolidate.py │ │ ├── import_utils.py │ │ ├── language_model/ │ │ │ ├── gemma2/ │ │ │ │ ├── configuration_gemma2.py │ │ │ │ ├── modeling_gemma2.py │ │ │ │ ├── tokenization_gemma2.py │ │ │ │ └── tokenization_gemma2_fast.py │ │ │ ├── llama/ │ │ │ │ ├── configuration_llama.py │ │ │ │ └── modeling_llama.py │ │ │ ├── llava_gemma2.py │ │ │ ├── llava_gemma2_moe.py │ │ │ ├── llava_llama.py │ │ │ ├── llava_llama_moe.py │ │ │ ├── llava_minicpm.py │ │ │ ├── llava_minicpm_moe.py │ │ │ ├── llava_mistral.py │ │ │ ├── llava_mistral_moe.py │ │ │ ├── llava_mpt.py │ │ │ ├── llava_phi.py │ │ │ ├── llava_phi_moe.py │ │ │ ├── llava_qwen.py │ │ │ ├── llava_qwen1_5.py │ │ │ ├── llava_qwen1_5_moe.py │ │ │ ├── llava_qwen2.py │ │ │ ├── llava_qwen2_moe.py │ │ │ ├── llava_qwen_moe.py │ │ │ ├── llava_stablelm.py │ │ │ ├── llava_stablelm_moe.py │ │ │ ├── minicpm/ │ │ │ │ ├── configuration_minicpm.py │ │ │ │ └── modeling_minicpm.py │ │ │ ├── mpt/ │ │ │ │ ├── adapt_tokenizer.py │ │ │ │ ├── attention.py │ │ │ │ ├── blocks.py │ │ │ │ ├── configuration_mpt.py │ │ │ │ ├── custom_embedding.py │ │ │ │ ├── flash_attn_triton.py │ │ │ │ ├── hf_prefixlm_converter.py │ │ │ │ ├── meta_init_context.py │ │ │ │ ├── modeling_mpt.py │ │ │ │ ├── norm.py │ │ │ │ └── param_init_fns.py │ │ │ ├── phi/ │ │ │ │ ├── configuration_phi.py │ │ │ │ └── modeling_phi.py │ │ │ ├── qwen/ │ │ │ │ ├── configuration_qwen.py │ │ │ │ ├── cpp_kernels.py │ │ │ │ ├── modeling_qwen.py │ │ │ │ ├── qwen_generation_utils.py │ │ │ │ └── tokenization_qwen.py │ │ │ ├── qwen1_5/ │ │ │ │ ├── configuration_qwen2.py │ │ │ │ └── modeling_qwen2.py │ │ │ ├── qwen2/ │ │ │ │ ├── configuration_qwen2.py │ │ │ │ └── modeling_qwen2.py │ │ │ └── stablelm/ │ │ │ ├── configuration_stablelm_epoch.py │ │ │ ├── modeling_stablelm_epoch.py │ │ │ └── tokenization_arcade100k.py │ │ ├── llava_arch.py │ │ ├── make_delta.py │ │ ├── modeling_flash_attention_utils.py │ │ ├── multimodal_encoder/ │ │ │ ├── builder.py │ │ │ ├── clip_encoder.py │ │ │ ├── clips2_encoder.py │ │ │ └── siglip_encoder.py │ │ ├── multimodal_projector/ │ │ │ ├── builder.py │ │ │ ├── pool_block.py │ │ │ ├── qformer.py │ │ │ └── simple_block.py │ │ └── utils.py │ ├── serve/ │ │ ├── __init__.py │ │ ├── cli.py │ │ └── utils.py │ ├── train/ │ │ ├── __init__.py │ │ ├── align_train.py │ │ ├── align_trainer.py │ │ ├── dpo_train.py │ │ ├── dpo_trainer.py │ │ ├── llava_trainer.py │ │ ├── train.py │ │ └── train_utils.py │ └── utils.py ├── requirements.txt ├── scripts/ │ ├── activated_params.py │ ├── convert_gqa_for_eval.py │ ├── convert_mmbench_for_submission.py │ ├── convert_mmvet_for_eval.py │ ├── convert_seed_for_submission.py │ ├── convert_sqa_to_llava.py │ ├── convert_sqa_to_llava_base_prompt.py │ ├── convert_vizwiz_for_submission.py │ ├── convert_vqav2_for_submission.py │ ├── extract_mm_projector.py │ ├── kill.py │ ├── merge_lora_weights.py │ ├── merge_moe_lora_weights.py │ ├── sqa_eval_batch.sh │ └── sqa_eval_gather.sh └── shells/ ├── eval/ │ ├── gqa.sh │ ├── mmbench.sh │ ├── mmbench_cn.sh │ ├── mme.sh │ ├── mmhal.sh │ ├── objhal.sh │ ├── pope.sh │ ├── sqa.sh │ ├── textvqa.sh │ └── vizwiz.sh ├── inference/ │ └── cli_inference.sh └── train/ └── qwen/ ├── dense2dense_distillation.sh ├── dense2sparse_distillation.sh ├── finetune.sh ├── finetune_moe.sh ├── preference_distillation.sh └── pretrain.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================ #
LLaVA-MoD: Making LLaVA Tiny via MoE Knowledge Distillation
[![License](https://img.shields.io/badge/License-Apache%202.0-yellow)](https://github.com/shufangxun/LLaVA-MoD/blob/main/LICENSE)
--- ## 📢 News - Jan 23, 2025: 🔥 LLaVA-MoD is accepted by ICLR 2025. - Dec 24, 2024: 🏋️‍♀️ Training and evaluation codes are released. - Aug 28, 2024: 🤗 LLaVA-MoD is featured on [Huggingface Daily Papers](https://huggingface.co/papers/2408.15881). - Aug 28, 2024: 📖 Paper is available on [Arxiv](https://arxiv.org/pdf/2408.15881). 🌟 **Star us if you think it's helpful.** Your support means a lot! ⭐️ --- ## ✨ Contents - [🧭 Overview](#-overview) - [🛠️ Installation](#-installation) - [🗂️ Data Construction](#-data-construction) - [🏋️‍♂️ Training and Evaluation](#-training-and-evaluation) - [🚀 Inference](#-inference) - [📖 Citation](#-citation) - [🏆 Acknowledgement](#-acknowledgement) - [📄 License](#-license) --- ## 🧭 Overview **TL; DR:** LLaVA-MoD is an efficient framework for training small-scale Multimodal Language Models by distilling knowledge from larger models.
🚀 CLICK for the full abstract We introduce **LLaVA-MoD**, a novel framework designed to enable the efficient training of small-scale Multimodal Language Models by distilling knowledge from large-scale MLLM. Our approach addresses two fundamental challenges in MLLM distillation: - **Network Optimization**: We enhance the s-MLLM structure by integrating a sparse Mixture of Experts (MoE) architecture, balancing computational efficiency and model expressiveness. - **Progressive Knowledge Transfer**: We propose a two-stage transfer strategy: 1. **Mimic Distillation**: Minimizing Kullback-Leibler (KL) divergence between output distributions to help the student model emulate the teacher's understanding. 2. **Preference Distillation**: Using Direct Preference Optimization (DPO), where the student model learns to outperform the teacher, especially in hallucination benchmarks. Extensive experiments show **LLaVA-MoD** outperforms existing models across multimodal benchmarks while activating only a minimal number of parameters and keeping computational costs low. With **only 2B activated parameters**, **LLaVA-MoD** surpasses **Qwen-VL-Chat-7B** by an average of **8.8%**, using merely **0.3% of the training data** and **23% trainable parameters**. These results highlight **LLaVA-MoD**’s success in distilling comprehensive knowledge from its teacher model, making it a groundbreaking solution for developing more efficient MLLMs.
--- ## 🛠️ Installation 1. First install `anaconda`, and install `torch`, We recommend installing `torch==2.1.2` and `cuda==11.8`. ```bash # CUDA 11.8 pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 --index-url https://download.pytorch.org/whl/cu118 ``` 2. Then install the packages in `requirements` ```bash pip install -r requirements.txt ``` --- ## 🗂️ Data Construction ### 📚 Mimic Distillation We follow LLaVA to construct the data as following format: ```json { "id": "000000052846", "image": "COCO2017/train/000000052846.jpg", "conversations": [ { "from": "human", "value": "Where is the cat positioned in the image?\n" }, { "from": "gpt", "value": "The cat is positioned on top of the back of the couch in the living room." }, { "from": "human", "value": "What is the cat doing in the image?" }, { "from": "gpt", "value": "The cat is coming out from some curtains onto the couch and is sitting or standing on top of it." } ] } ``` ### 🧠 Preference Distillation We follow RLAIF-V to construct the data as following format: ```json { "id": "OmniLMM-12B_OmniLMM-12B_0", "image": "RLAIF-V-Dataset_000_OmniLMM-12B_OmniLMM-12B_0.jpg", "chosen": [ { "from": "human", "value": "\nWho is more likely to use these tools a leather crafter or a paper crafter?" }, { "from": "gpt", "value": "A leather crafter is more likely to use these tools. The image shows various crafting tools, including scissors and a hole punch, which are commonly used in leatherworking projects. Leather is a material that requires cutting, shaping, and precise hole-punching techniques to create desired designs or patterns. In contrast, paper crafters typically use different types of tools, such as adhesives, decorative papers, or specialized cutting machines like the Silhouette Cameo, for their projects." } ], "rejected": [ { "from": "human", "value": "\nWho is more likely to use these tools a leather crafter or a paper crafter?" }, { "from": "gpt", "value": "A leather crafter is more likely to use these tools as they consist of a hole punch, scissors, and a knife. These items are typically used in crafting projects involving fabric or leather materials for various designs and patterns. Paper crafters may also benefit from some of these tools, but their primary focus would be on paper-related projects, which might require different types of tools such as paper cutters or scrapbooking supplies." } ] } ``` --- ## 🏋️‍♂️ Training and Evaluation The full details for training and evaluation can be found in the [TRAIN_EVAL.md](docs/TRAIN_EVAL.md). --- ## 🚀 Inference For instructions on inference, please refer to the [INFERENCE.md](docs/INFERENCE.md). --- ## 📖 Citation If you find our project useful for your research and applications, please star it and cite the paper using this BibTeX: ```BibTeX @article{shu2024llavamod, title={LLaVA-MoD: Making LLaVA Tiny via MoE Knowledge Distillation}, author={Shu, Fangxun and Liao, Yue and Zhuo, Le and Xu, Chenning and Zhang, Lei and Zhang, Guanghao and Shi, Haonan and Chen, Long and Zhong, Tao and He, Wanggui and Fu, Siming and others}, journal={arXiv preprint arXiv:2408.15881}, year={2024} } ``` --- ## 🏆 Acknowledgement Our project is built upon [MoE-LLaVA](https://github.com/PKU-YuanGroup/MoE-LLaVA) and [LLaVA](https://github.com/haotian-liu/LLaVA). We are deeply grateful for the excellent codebase they provide. Additionally, we express our appreciation to [MobileVLM](https://github.com/Meituan-AutoML/MobileVLM) and [RLAIF-V](https://github.com/RLHF-V/RLAIF-V) for their meticulously processed datasets. Their contributions have been of immeasurable value in shaping our work. --- ## 📄 License Our project is released under the Apache 2.0 license. ================================================ FILE: docs/INFERENCE.md ================================================ ## Inference We provide commandline inference and batch inference scripts. ### CLI Inference ```Shell deepspeed --include localhost:0 --master_port 20019 llavamod/serve/cli.py \ --model-path ${MODEL_PATH} --image-file ${IMAGE_FILE} ``` ### Batch Inference ```Shell deepspeed --master_port 20014 llavamod/eval/model_vqa.py \ --model-path ${MODEL_PATH} \ --question-file ${QUESTION_FILE} \ --image-folder ${IMAGE_FOLDER} \ --answers-file ${ANSWER_FILE} \ --temperature 0.0 \ --conv-mode qwen ``` ================================================ FILE: docs/TRAIN_EVAL.md ================================================ ## Preliminary ### Download Pretrained Checkpoints We use [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) as the vision encoder for both teacher and student models. Additionally, we use [Qwen-1.5](https://huggingface.co/collections/Qwen/qwen15-65c0a2f577b1ecb76d786524) / [Qwen-2](https://huggingface.co/collections/Qwen/qwen2-6659360b33528ced941e557f) of different sizes respectively as the LLM for the teacher and student models. These pretrained checkpoints can be downloaded from HuggingFace. ### Prepare Teacher Model We follow the approach of [LLaVA-1.5](https://github.com/haotian-liu/LLaVA) to train the teacher model, replacing Vicuna-1.5-7B with Qwen-2-7B, while keeping the training dataset and strategy unchanged. ## Training The training of LLaVA-MoD comprises three stages: - Adaptor Initialization: 0.6 million general captioning samples are employed to bridge the gap between visual and language modalities. - Mimic Distillation: - Dense-to-Dense Distillation: 2.4 million general captioning and conversation samples are utilized to distill general knowledge. - Dense-to-Sparse Distillation: 1.4 million multi-task data, including VQA, documents, science, and OCR, are used to distill specialized knowledge. - Preference Distillation tuning stage: 80,000 preference data samples are utilized to distill preference knowledge. ### Adaptor Initialization - first, download the caption dataset [LLaVA-Pretrain](https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain) - then run the following scripts: ```shell bash shells/train/qwen/pretrain.sh ``` ### Mimic Distillation In this stage, we initially conduct Dense-to-Dense Distillation on the dense student model. Subsequently, we up-cycle the student model from dense to sparse and conduct Dense-to-Sparse Distillation. #### Dense-to-Dense Distillation - first, download general caption datasets ([ShareGPT4V-Captioner](https://huggingface.co/datasets/Lin-Chen/ShareGPT4V/blob/main/share-captioner_coco_lcs_sam_1246k_1107.json) and [ALLaVA-Caption-LAION-4V](https://huggingface.co/datasets/FreedomIntelligence/ALLaVA-4V/blob/main/allava_laion/ALLaVA-Caption-LAION-4V.json)) and general conversation datasets ([SViT](https://github.com/BAAI-DCAI/Visual-Instruction-Tuning), [LVIS](https://github.com/X2FD/LVIS-INSTRUCT4V), [LRV](https://github.com/FuxiaoLiu/LRV-Instruction), [MIMIC-IT](https://github.com/Luodian/Otter)). The general datasets have also been packaged and can be downloaded from [MoE-LLaVA](https://huggingface.co/datasets/LanguageBind/MoE-LLaVA). - then, set the distillation and model configuration: ```python # KD config POLICY_MODEL_TYPE='dense' REF_MODEL_TYPE='dense' LOSS_TYPE='only_kd' # kd_lm | only_kd DISTILL_ALL_TOKENS=False # False: only response, True: multimodal instruction + response # MoE config MOE_LOSS_ENABLE=False MOE_ENABLE=False MOE_FINETUNE=False MOE_MODE="sparse" NUM_EXPERTS=4 TOP_K_EXPERTS=2 USE_RESIDUAL=False ROUTER_AUX_LOSS_COEF=0.01 CAPACITY_FACTOR=1.5 ``` - finally, run the following scripts: ```shell bash shells/train/qwen/dense2dense_distillation.sh ``` #### Dense-to-Sparse Distillation - first, download multi-task datasets ([Text-VQA](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip), [IConQA](https://drive.google.com/file/d/1Xqdt1zMcMZU5N_u1SAIjk-UAclriynGx/edit), [SQA](https://drive.google.com/drive/folders/1w8imCXWYn2LxajmGeGH_g5DaL2rabHev), [SBU](https://huggingface.co/datasets/sbu_captions), follow [ShareGPT4V](https://github.com/InternLM/InternLM-XComposer/blob/main/projects/ShareGPT4V/docs/Data.md) to download images from: [LAION-CC-SBU-558K](https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain/blob/main/images.zip), [COCO](http://images.cocodataset.org/zips/train2017.zip), [WebData](https://drive.google.com/drive/folders/1tCUQ-sq6vdshZVkF0ZeF3K4eztkXJgax?usp=sharing), [SAM](https://drive.google.com/file/d/1dKumdOKSXtV7lIXdrG7jsIK_z2vZv2gs/view?usp=drive_link), [GQA](https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip), [OCR-VQA](https://drive.google.com/drive/folders/1_GYPY5UkUy7HIcR0zq3ZCFgeZN7BAfm_?usp=sharing), [TextVQA](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip), [VisualGnome](https://cs.stanford.edu/people/rak248/VG_100K_2) ([Part1](https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip), [Part2](https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip)), follow [InternVL](https://huggingface.co/datasets/OpenGVLab/InternVL-Chat-V1-2-SFT-Data) to download [DVQA](https://github.com/kushalkafle/DVQA_dataset), [ChartQA](https://github.com/vis-nlp/ChartQA), [AI2D](https://allenai.org/data/diagrams), [DocVQA](https://www.docvqa.org/datasets), [GeoQA+](https://github.com/SCNU203/GeoQA-Plus), and [SynthDoG-EN](https://huggingface.co/datasets/naver-clova-ix/synthdog-en)). The json files have also been packaged and can be downloaded from [MobileVLM](https://huggingface.co/datasets/mtgv/MobileVLM_V2_FT_Mix2M) and [InternVL](https://huggingface.co/datasets/OpenGVLab/InternVL-Chat-V1-2-SFT-Data). - then, set the distillation and model configuration: ```python # KD config POLICY_MODEL_TYPE='dense' REF_MODEL_TYPE='dense' LOSS_TYPE='only_kd' # kd_lm | only_kd DISTILL_ALL_TOKENS=False # False: only response, True: multimodal instruction + response # MoE config MOE_LOSS_ENABLE=False MOE_ENABLE=False MOE_FINETUNE=False MOE_MODE="sparse" NUM_EXPERTS=4 TOP_K_EXPERTS=2 USE_RESIDUAL=False ROUTER_AUX_LOSS_COEF=0.01 CAPACITY_FACTOR=1.5 ``` - finally, run the following scripts: ```shell bash shells/train/qwen/dense2sparse_distillation.sh ``` ### Preference Distillation - first, download preference dataset from [RLAIF-V](https://huggingface.co/datasets/openbmb/RLAIF-V-Dataset). - then, set the distillation and model configuration: ```python # KD config POLICY_MODEL_TYPE='sparse' REF_MODEL_TYPE='dense' LOSS_TYPE='kto_pair' # kto_pair | sigmoid DISTILL_ALL_TOKENS=False # False: only response, True: multimodal instruction + response # MoE config MOE_LOSS_ENABLE=True MOE_ENABLE=True MOE_FINETUNE=True MOE_MODE="sparse" NUM_EXPERTS=4 TOP_K_EXPERTS=2 USE_RESIDUAL=False ROUTER_AUX_LOSS_COEF=0.01 CAPACITY_FACTOR=1.5 ``` - finally, run the following scripts: ```shell bash shells/train/qwen/preference_distillation.sh ``` ## Evaluation We follow [LLaVA-1.5](https://github.com/haotian-liu/LLaVA) to evaluate on comprehension benchmarks (TextVQA, GQA, ScienceQA, VizWiz, MME, MMBench) and [RLAIF-V](https://github.com/RLHF-V/RLAIF-V) to evaluate on hallucination benchmarks (MMHal Bench, POPE and Object HalBench). Please refer to these resources to organize the evaluation datasets. All the evaluation scripts are located under `shells/eval`. Here is an example for MMBench. ```shell #!/bin/bash MODEL_NAME='your_model_name' MODEL_PATH='your_model_path' CONV="qwen" SPLIT="mmbench_dev_en_20231003" EVAL="benchmark" deepspeed --include localhost:0 --master_port 20029 llavamod/eval/model_vqa_mmbench.py \ --model-path ${MODEL_PATH} \ --question-file ${EVAL}/mmbench/$SPLIT.tsv \ --answers-file ${EVAL}/mmbench/answers/$SPLIT/${MODEL_NAME}.jsonl \ --single-pred-prompt \ --temperature 0 \ --conv-mode ${CONV} mkdir -p ${EVAL}/mmbench/answers_upload/$SPLIT python3 scripts/convert_mmbench_for_submission.py \ --annotation-file ${EVAL}/mmbench/$SPLIT.tsv \ --result-dir ${EVAL}/mmbench/answers/$SPLIT \ --upload-dir ${EVAL}/mmbench/answers_upload/$SPLIT \ --experiment ${MODEL_NAME} ``` ================================================ FILE: llavamod/__init__.py ================================================ from .model import LlavaLlamaForCausalLM from .model import LLaVAMoDLlamaForCausalLM from .model import LlavaQWenForCausalLM from .model import LLaVAMoDLlamaForCausalLM import transformers a, b, c = transformers.__version__.split('.')[:3] if a == '4' and int(b) >= 34: from .model import LlavaMistralForCausalLM from .model import LLaVAMoDMistralForCausalLM if a == '4' and int(b) >= 36: from .model import LlavaMiniCPMForCausalLM from .model import LLaVAMoDMiniCPMForCausalLM from .model import LlavaPhiForCausalLM from .model import LLaVAMoDPhiForCausalLM from .model import LlavaStablelmForCausalLM from .model import LLaVAMoDStablelmForCausalLM if a == '4' and int(b) >= 37: from .model import LlavaQwen1_5ForCausalLM from .model import LLaVAMoDQwen1_5ForCausalLM ================================================ FILE: llavamod/config/__init__.py ================================================ ================================================ FILE: llavamod/config/args.py ================================================ from typing import Optional, List from dataclasses import field from llavamod.data.dataset import * from llavamod.train.train_utils import * @dataclass class ModelArguments: model_name_or_path: Optional[str] = field(default="facebook/opt-125m") version: Optional[str] = field(default="v0") freeze_backbone: bool = field(default=False) tune_llm_ffn_only: bool = field(default=False) tune_mm_mlp_adapter: bool = field(default=False) mm_vision_select_layer: Optional[int] = field(default=-1) # default to the last layer pretrain_mm_mlp_adapter: Optional[str] = field(default=None) mm_use_im_start_end: bool = field(default=False) mm_use_im_patch_token: bool = field(default=True) mm_vision_select_feature: Optional[str] = field(default="patch") s2: bool = field(default=False) s2_scales: Optional[str] = field(default="336,672") # =================================================================== image_tower: Optional[str] = field(default=None) video_tower: Optional[str] = field(default=None) image_projector_type: Optional[str] = field(default='linear') video_projector_type: Optional[str] = field(default='linear') video_global_proj: bool = field(default=False) video_temproal_proj: bool = field(default=False) video_spatial_proj: bool = field(default=False) # =================================================================== # ============================================================= only_lora_ffn: bool = True moe_enable: bool = False train_modules: Optional[List[str]] = field(default=None, metadata={"help": ""}) moe_mode: str = field( default="second_half", metadata={ "help": "The backend to be used for half precision.", "choices": ["first_half", "second_half", "sparse", "dense"], }, ) moe_layers_idx: Optional[List[int]] = field(default=None, metadata={"help": "where to place moe layers."}) ep_size: int = 1 num_experts: Optional[List[int]] = field(default=4, metadata={"help": "number of experts for each moe layer."}) top_k_experts: int = field( default=2, metadata={ "help": "Top-k experts to deal with tokens.", "choices": [1, 2, 3, 4], }, ) capacity_factor: float = 1. eval_capacity_factor: float = 2. min_capacity: int = 0 use_residual: bool = False router_aux_loss_coef: float = 0.01 # ============================================================= @dataclass class DataArguments: lazy_preprocess: bool = False is_multimodal: bool = False image_aspect_ratio: str = 'square' # =================================================================== data_path: Optional[List[str]] = field(default=None, metadata={"help": "Path to the training data."}) image_folder: Optional[str] = field(default=None) video_folder: Optional[str] = field(default=None) num_frames: int = 8 # =================================================================== @dataclass class TrainingArguments(transformers.TrainingArguments): cache_dir: Optional[str] = field(default=None) optim: str = field(default="adamw_torch") remove_unused_columns: bool = field(default=False) freeze_mm_mlp_adapter: bool = field(default=False) mpt_attn_impl: Optional[str] = field(default="triton") model_max_length: int = field( default=512, metadata={ "help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)." }, ) double_quant: bool = field( default=True, metadata={"help": "Compress the quantization statistics through double quantization."} ) quant_type: str = field( default="nf4", metadata={"help": "Quantization data type to use. Should be one of `fp4` or `nf4`."} ) bits: int = field( default=16, metadata={"help": "How many bits to use."} ) lora_enable: bool = False lora_r: int = 128 lora_alpha: int = 256 lora_dropout: float = 0.05 lora_weight_path: str = "" lora_bias: str = "none" mm_projector_lr: Optional[float] = None group_by_modality_length: bool = field(default=False) moe_finetune: bool = field(default=False) distill_all_tokens: bool = field(default=False) attn_implementation: str = field(default="flash_attention_2", metadata={"help": "Use transformers attention implementation."}) @dataclass class AlignArguments: policy_model_type: str = field(default='sparse') ref_model_type: str = field(default='dense') loss_type: str = field(default='only_kd') policy_model_name_or_path: str = field(default=None) policy_pretrain_mm_mlp_adapter: str = field(default=None) ref_model_name_or_path: str = field(default=None) ref_pretrain_mm_mlp_adapter: str = field(default=None) moe_loss_enable: bool = field(default=False) @dataclass class DPOArguments: policy_model_type: str = field(default='sparse') ref_model_type: str = field(default='dense') loss_type: str = field(default='sigmoid') policy_model_name_or_path: str = field(default=None) ref_model_name_or_path: str = field(default=None) moe_loss_enable: bool = field(default=False) ================================================ FILE: llavamod/config/dpconfig/dpo_zero2.json ================================================ { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": 0, "warmup_max_lr": "auto", "warmup_num_steps": "auto", "warmup_type": "linear", "total_num_steps": "auto" } }, "train_micro_batch_size_per_gpu": "auto", "train_batch_size": "auto", "gradient_accumulation_steps": "auto", "zero_optimization": { "stage": 2, "overlap_comm": true, "sub_group_size": 1e9, "allgather_partitions": true, "allgather_bucket_size": 2e8, "reduce_scatter": true, "reduce_bucket_size": 2e8, "contiguous_gradients": true } } ================================================ FILE: llavamod/config/dpconfig/dpo_zero2_offload.json ================================================ { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 100, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1e-15 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": 0, "warmup_max_lr": "auto", "warmup_num_steps": "auto", "warmup_type": "linear", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 2e8, "contiguous_gradients": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false } ================================================ FILE: llavamod/config/dpconfig/zero2.json ================================================ { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "train_micro_batch_size_per_gpu": "auto", "train_batch_size": "auto", "gradient_accumulation_steps": "auto", "zero_optimization": { "stage": 2, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto" } } ================================================ FILE: llavamod/config/dpconfig/zero2_offload.json ================================================ { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 100, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "train_micro_batch_size_per_gpu": "auto", "train_batch_size": "auto", "gradient_accumulation_steps": "auto", "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu" }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto" } } ================================================ FILE: llavamod/config/dpconfig/zero3.json ================================================ { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 100, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "train_micro_batch_size_per_gpu": "auto", "train_batch_size": "auto", "gradient_accumulation_steps": "auto", "zero_optimization": { "stage": 3, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true } } ================================================ FILE: llavamod/config/dpconfig/zero3_offload.json ================================================ { "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "gather_16bit_weights_on_model_save": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "steps_per_print": 1e5, "wall_clock_breakdown": false } ================================================ FILE: llavamod/constants.py ================================================ CONTROLLER_HEART_BEAT_EXPIRATION = 30 WORKER_HEART_BEAT_INTERVAL = 15 LOGDIR = "." IGNORE_INDEX = -100 IMAGE_TOKEN_INDEX = -200 DEFAULT_IMAGE_TOKEN = "" DEFAULT_IMAGE_PATCH_TOKEN = "" DEFAULT_IM_START_TOKEN = "" DEFAULT_IM_END_TOKEN = "" IMAGE_PLACEHOLDER = "" # ====================================================================================================== DEFAULT_VIDEO_TOKEN = "