Repository: RainBowLuoCS/DiffusionTrack Branch: main Commit: 9714f01c21ff Files: 99 Total size: 550.2 KB Directory structure: gitextract_zi_bwyml/ ├── .gitattributes ├── .gitignore ├── LICENSE ├── README.md ├── diffusion/ │ └── models/ │ ├── diffusion_head.py │ ├── diffusion_losses.py │ ├── diffusion_models.py │ └── diffusionnet.py ├── exps/ │ ├── default/ │ │ ├── nano.py │ │ ├── yolov3.py │ │ ├── yolox_l.py │ │ ├── yolox_m.py │ │ ├── yolox_s.py │ │ ├── yolox_tiny.py │ │ └── yolox_x.py │ └── example/ │ └── mot/ │ ├── yolox_x_diffusion_det_dancetrack.py │ ├── yolox_x_diffusion_det_mot17.py │ ├── yolox_x_diffusion_det_mot17_ablation.py │ ├── yolox_x_diffusion_det_mot20.py │ ├── yolox_x_diffusion_track_dancetrack.py │ ├── yolox_x_diffusion_track_dancetrack_baseline.py │ ├── yolox_x_diffusion_track_mot17.py │ ├── yolox_x_diffusion_track_mot17_ablation.py │ ├── yolox_x_diffusion_track_mot17_baseline.py │ ├── yolox_x_diffusion_track_mot20.py │ └── yolox_x_diffusion_track_mot20_baseline.py ├── requirements.txt ├── setup.py ├── tools/ │ ├── convert_bdd100k_to_coco.py │ ├── convert_cityperson_to_coco.py │ ├── convert_crowdhuman_to_coco.py │ ├── convert_dancetrack_to_coco.py │ ├── convert_ethz_to_coco.py │ ├── convert_kitti_to_coco.py │ ├── convert_mot17_to_coco.py │ ├── convert_mot20_to_coco.py │ ├── convert_video.py │ ├── mix_data_ablation.py │ ├── mix_data_bdd100k.py │ ├── mix_data_test_mot17.py │ ├── mix_data_test_mot20.py │ ├── mota.py │ ├── track.py │ ├── train.py │ └── txt2video.py └── yolox/ ├── __init__.py ├── core/ │ ├── __init__.py │ ├── launch.py │ └── trainer.py ├── data/ │ ├── __init__.py │ ├── data_augment.py │ ├── data_prefetcher.py │ ├── dataloading.py │ └── samplers.py ├── evaluators/ │ ├── __init__.py │ ├── coco_evaluator.py │ ├── diffusion_mot_evaluator.py │ ├── diffusion_mot_evaluator_kl.py │ └── evaluation.py ├── exp/ │ ├── __init__.py │ ├── base_exp.py │ ├── build.py │ └── yolox_base.py ├── layers/ │ ├── __init__.py │ ├── csrc/ │ │ ├── cocoeval/ │ │ │ ├── cocoeval.cpp │ │ │ └── cocoeval.h │ │ └── vision.cpp │ └── fast_coco_eval_api.py ├── models/ │ ├── __init__.py │ ├── darknet.py │ ├── losses.py │ ├── network_blocks.py │ ├── yolo_fpn.py │ ├── yolo_head.py │ ├── yolo_pafpn.py │ └── yolox.py ├── tracker/ │ ├── basetrack.py │ ├── diffusion_tracker.py │ ├── diffusion_tracker_kl.py │ ├── kalman_filter.py │ └── matching.py ├── tracking_utils/ │ ├── evaluation.py │ ├── io.py │ └── timer.py └── utils/ ├── __init__.py ├── allreduce_norm.py ├── box_ops.py ├── boxes.py ├── checkpoint.py ├── cluster_nms.py ├── demo_utils.py ├── dist.py ├── ema.py ├── logger.py ├── lr_scheduler.py ├── metric.py ├── model_utils.py ├── setup_env.py └── visualize.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitattributes ================================================ README.assets/MOT20.gif filter=lfs diff=lfs merge=lfs -text README.assets/dancetrack.gif filter=lfs diff=lfs merge=lfs -text ================================================ FILE: .gitignore ================================================ # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so datasets/* # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ pip-wheel-metadata/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # PEP 582; used by e.g. github.com/David-OConnor/pyflow __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # output docs/api .code-workspace.code-workspace *.pkl *.npy *.pth *.onnx *.engine events.out.tfevents* pretrained *_outputs/ DiffusionTrack_*/ datasets/ *.pth.tar *.tar.gz src/* test.py id_rsa_cs module_test.py vis_fold ================================================ FILE: LICENSE ================================================ Attribution-NonCommercial 4.0 International ======================================================================= Creative Commons Corporation ("Creative Commons") is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an "as-is" basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. Using Creative Commons Public Licenses Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC- licensed material, or material used under an exception or limitation to copyright. More considerations for licensors: wiki.creativecommons.org/Considerations_for_licensors Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor's permission is not necessary for any reason--for example, because of any applicable exception or limitation to copyright--then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More_considerations for the public: wiki.creativecommons.org/Considerations_for_licensees ======================================================================= Creative Commons Attribution-NonCommercial 4.0 International Public License By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. Section 1 -- Definitions. a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. c. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. d. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. e. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. f. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. g. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. h. Licensor means the individual(s) or entity(ies) granting rights under this Public License. i. NonCommercial means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange. j. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. k. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. l. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. Section 2 -- Scope. a. License grant. 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: a. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and b. produce, reproduce, and Share Adapted Material for NonCommercial purposes only. 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 3. Term. The term of this Public License is specified in Section 6(a). 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a) (4) never produces Adapted Material. 5. Downstream recipients. a. Offer from the Licensor -- Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. b. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). b. Other rights. 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 2. Patent and trademark rights are not licensed under this Public License. 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes. Section 3 -- License Conditions. Your exercise of the Licensed Rights is expressly made subject to the following conditions. a. Attribution. 1. If You Share the Licensed Material (including in modified form), You must: a. retain the following if it is supplied by the Licensor with the Licensed Material: i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); ii. a copyright notice; iii. a notice that refers to this Public License; iv. a notice that refers to the disclaimer of warranties; v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; b. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and c. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 4. If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License. Section 4 -- Sui Generis Database Rights. Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only; b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. Section 5 -- Disclaimer of Warranties and Limitation of Liability. a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. Section 6 -- Term and Termination. a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 2. upon express reinstatement by the Licensor. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. Section 7 -- Other Terms and Conditions. a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. Section 8 -- Interpretation. a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. ======================================================================= Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” The text of the Creative Commons public licenses is dedicated to the public domain under the CC0 Public Domain Dedication. Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark "Creative Commons" or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. Creative Commons may be contacted at creativecommons.org. ================================================ FILE: README.md ================================================ ## DiffusionTrack:Diffusion Model For Multi-Object Tracking **DiffusionTrack is the first work of diffusion model for multi-object tracking.** ![image-20230819130751450](README.assets/image-20230819130751450.png) [**DiffusionTrack:Diffusion Model For Multi-Object Tracking**](https://arxiv.org/abs/2308.09905) Run Luo, Zikai Song, Lintao Ma, Jinlin Wei *[arXiv 2308.09905](https://arxiv.org/abs/2308.09905)* ## Tracking performance ### Results on MOT17 challenge test set with 15.89 FPS | Method | MOTA | IDF1 | HOTA | AssA | DetA | | ------------------ | -------- | -------- | -------- | -------- | -------- | | TrackFormer | 74.1 | 68.0 | 57.3 | 54.1 | 60.9 | | MeMOT | 72.5 | 69.0 | 56.9 | 55.2 | / | | MOTR | 71.9 | 68.4 | 57.2 | 55.8 | / | | CenterTrack | 67.8 | 64.7 | 52.2 | 51.0 | 53.8 | | PermaTrack | 73.8 | 68.9 | 55.5 | 53.1 | 58.5 | | TransCenter | 73.2 | 62.2 | 54.5 | 49.7 | 60.1 | | GTR | 75.3 | 71.5 | 59.1 | 57.0 | 61.6 | | TubeTK | 63.0 | 58.6 | / | / | / | | **DiffusionTrack** | **77.9** | **73.8** | **60.8** | **58.8** | **63.2** | ### Results on MOT20 challenge test set with 13.37 FPS | Method | MOTA | IDF1 | HOTA | AssA | DetA | | ------------------ | -------- | -------- | -------- | -------- | -------- | | TrackFormer | 68.6 | 65.7 | 54.7 | 53.0 | 56.7 | | MeMOT | 63.7 | 66.1 | 54.1 | **55.0** | / | | TransCenter | 67.7 | 58.7 | / | / | / | | **DiffusionTrack** | **72.8** | **66.3** | **55.3** | 51.3 | **59.9** | ### Results on Dancetrack challenge test set with 21.05 FPS | Method | MOTA | IDF1 | HOTA | AssA | DetA | | ------------------ | -------- | -------- | -------- | -------- | -------- | | TransTrack | 88.4 | 45.2 | 45.5 | 27.5 | 75.9 | | CenterTrack | 86.8 | 35.7 | 41.8 | 22.6 | 78.1 | | **DiffusionTrack** | **89.3** | **47.5** | **52.4** | **33.5** | **82.2** | ### Visualization results ![MOT20](README.assets/MOT20.gif) ![dancetrack](README.assets/dancetrack.gif) ### Robustness to detection perturbation ![image-20230819134931428](README.assets/image-20230819134931428.png) ## Installation Step1. Install requirements for DiffusionTrack. ``` git clone https://github.com/RainBowLuoCS/DiffusionTrack.git cd DiffusionTrack pip3 install -r requirements.txt python3 setup.py develop ``` Step2. Install [pycocotools](https://github.com/cocodataset/cocoapi). ``` pip3 install cython; pip3 install 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' ``` Step3. Others ``` pip3 install cython_bbox ``` Step4. Install detectron2 ``` git clone https://github.com/facebookresearch/detectron2.git python -m pip install -e detectron2 ``` ## Data preparation Download [MOT17](https://motchallenge.net/), [MOT20](https://motchallenge.net/), [CrowdHuman](https://www.crowdhuman.org/), [Cityperson](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/DATASET_ZOO.md), [ETHZ](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/DATASET_ZOO.md) ,[Dancetrack](https://github.com/DanceTrack/DanceTrack) put them under /datasets in the following structure: ``` datasets |——————mot | └——————train | └——————test └——————crowdhuman | └——————Crowdhuman_train | └——————Crowdhuman_val | └——————annotation_train.odgt | └——————annotation_val.odgt └——————MOT20 | └——————train | └——————test └——————dancetrack | └——————train | └——————test └——————Cityscapes | └——————images | └——————labels_with_ids └——————ETHZ └——————eth01 └——————... └——————eth07 ``` Then, you need to turn the datasets to COCO format and mix different training data: ``` cd python3 tools/convert_mot17_to_coco.py python3 tools/convert_dancetrack_to_coco.py python3 tools/convert_mot20_to_coco.py python3 tools/convert_crowdhuman_to_coco.py python3 tools/convert_cityperson_to_coco.py python3 tools/convert_ethz_to_coco.py ``` Before mixing different datasets, you need to follow the operations in [mix_xxx.py](https://github.com/ifzhang/ByteTrack/blob/c116dfc746f9ebe07d419caa8acba9b3acfa79a6/tools/mix_data_ablation.py#L6) to create a data folder and link. Finally, you can mix the training data: ``` cd python3 tools/mix_data_ablation.py python3 tools/mix_data_test_mot17.py python3 tools/mix_data_test_mot20.py ``` ## Model zoo You can download our model weight from [our model zoo](https://drive.google.com/drive/folders/1xfBo04Ncm504xFUMtC4_0g0Bf61yPsXh?usp=sharing). We provide a 32-bit precision model, you can load it and then use half-precision fine-tuning to get a 16-bit precision model weight, so that you will get the above inference speed. ## Training The pretrained YOLOX model can be downloaded from their [model zoo](https://github.com/ifzhang/ByteTrack). After downloading the pretrained models, you can put them under /pretrained. - **Train ablation model (MOT17 half train and CrowdHuman)** ``` cd python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_det_mot17_ablation.py -d 8 -b 16 -o -c pretrained/bytetrack_ablation.pth.tar python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_track_mot17_ablation.py -d 8 -b 16 -o -c pretrained/diffusiontrack_ablation_det.pth.tar ``` - **Train MOT17 test model (MOT17 train, CrowdHuman, Cityperson and ETHZ)** ``` cd python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_det_mot17.py -d 8 -b 16 -o -c pretrained/bytetrack_x_mot17.pth.tar python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_track_mot17.py -d 8 -b 16 -o -c pretrained/diffusiontrack_mot17_det.pth.tar ``` - **Train MOT20 test model (MOT20 train, CrowdHuman)** ``` cd python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_det_mot20.py -d 8 -b 16 -o -c pretrained/bytetrack_x_mot20.pth.tar python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_track_mot20.py -d 8 -b 16 -o -c pretrained/diffusiontrack_mot20_det.pth.tar ``` **Train Dancetrack test model (Dancetrack)** ``` cd python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_det_dancetrack.py -d 8 -b 16 -o -c pretrained/bytetrack_x_mot17.pth.tar python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_track_dancetrack.py -d 8 -b 16 -o -c pretrained/diffusiontrack_dancetrack_det.pth.tar ``` ## Tracking - **Evaluation on MOT17 half val** ``` cd python3 tools/track.py -f exps/example/mot/yolox_x_diffusion_track_mot17_ablation.py -c pretrained/diffusiontrack_ablation_track.pth.tar -b 1 -d 1 --fuse ``` - **Test on MOT17** ``` cd python3 tools/track.py -f exps/example/mot/yolox_x_diffusion_track_mot17.py -c pretrained/diffusiontrack_mot17_track.pth.tar -b 1 -d 1 --fuse ``` - **Test on MOT20** ``` cd python3 tools/track.py -f exps/example/mot/yolox_x_diffusion_track_mot20.py -c pretrained/diffusiontrack_mot20_track.pth.tar -b 1 -d 1 --fuse ``` - **Test on Dancetrack** ``` cd python3 tools/track.py -f exps/example/mot/yolox_x_diffusion_track_dancetrack.py -c pretrained/diffusiontrack_dancetrack_track.pth.tar -b 1 -d 1 --fuse ``` ## News - (2024.02) [DiffMOT](https://github.com/Kroery/DiffMOT.git) is accepted by CVPR2024, demonstrating the potential of the diffusion-based tracker and once again validating our visionary insights, congratulations! - (2023.12) Our paper is accepted by AAAI2024! - (2023.08) Code is released! - (2023.06) Despite being rejected by NIPS2023, we firmly believe the diffusion model is a novel solution for multi-object tracking problems. - (2022.11) Write the first line of the code for this great idea! ## License This project is under the CC-BY-NC 4.0 license. See [LICENSE](https://github.com/ShoufaChen/DiffusionDet/blob/main/LICENSE) for details. ## Citation If you use DiffusionTrack in your research or wish to refer to the baseline results published here, please use the following BibTeX entry. ``` @article{luo2023diffusiontrack, title={DiffusionTrack: Diffusion Model For Multi-Object Tracking}, author={Luo, Run and Song, Zikai and Ma, Lintao and Wei, Jinlin and Yang, Wei and Yang, Min}, journal={arXiv preprint arXiv:2308.09905}, year={2023} } ``` ## Acknowledgement A large part of the code is borrowed from [ByteTrack](https://github.com/ifzhang/ByteTrack) and [DiffusionDet](https://github.com/ShoufaChen/DiffusionDet) thanks for their wonderful works. ================================================ FILE: diffusion/models/diffusion_head.py ================================================ import math import random from collections import namedtuple import torch import torch.nn.functional as F from torch import nn from torchvision.ops import nms,box_iou from .diffusion_losses import SetCriterionDynamicK, HungarianMatcherDynamicK from .diffusion_models import DynamicHead from yolox.utils.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh from yolox.utils import synchronize from detectron2.layers import batched_nms import time ModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start']) def exists(x): return x is not None def default(val, d): if exists(val): return val return d() if callable(d) else d def extract(a, t, x_shape): """extract the appropriate t index for a batch of indices""" batch_size = t.shape[0] out = a.gather(-1, t) return out.reshape(batch_size, *((1,) * (len(x_shape) - 1))) def cosine_beta_schedule(timesteps, s=0.008): """ cosine schedule as proposed in https://openreview.net/forum?id=-NEXDKk8gZ """ steps = timesteps + 1 x = torch.linspace(0, timesteps, steps, dtype=torch.float64) alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * math.pi * 0.5) ** 2 alphas_cumprod = alphas_cumprod / alphas_cumprod[0] betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) return torch.clip(betas, 0, 0.999) class DiffusionHead(nn.Module): """ Implement DiffusionHead """ def __init__(self, num_classes, width=1.0, strides=[8, 16, 32], num_proposals=500, num_heads=6,): super().__init__() self.device="cpu" self.dtype=torch.float32 self.width=width self.num_classes = num_classes self.num_proposals = num_proposals # self.num_proposals = 512 self.hidden_dim = int(256*width) self.num_heads = num_heads # build diffusion timesteps = 1000 sampling_timesteps = 1 self.objective = 'pred_x0' betas = cosine_beta_schedule(timesteps) alphas = 1. - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.) timesteps, = betas.shape self.num_timesteps = int(timesteps) # tracking setting self.inference_time_range=1 self.track_candidate=1 self.candidate_num_strategy=max self.sampling_timesteps = default(sampling_timesteps, timesteps) assert self.sampling_timesteps <= timesteps self.is_ddim_sampling = self.sampling_timesteps < timesteps self.ddim_sampling_eta = 1. self.self_condition = False self.scale = 2.0 self.box_renewal = True self.use_ensemble = True self.register_buffer('betas', betas) self.register_buffer('alphas_cumprod', alphas_cumprod) self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod)) self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod)) self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod)) self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod)) self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1)) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod) # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', posterior_variance) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min=1e-20))) self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)) self.register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod)) # Build Dynamic Head. class_weight = 2.0 giou_weight = 2.0 l1_weight = 5.0 no_object_weight =0.1 self.deep_supervision = True self.use_focal = True self.use_fed_loss = False self.use_nms = False self.pooler_resolution=7 self.noise_strategy="xywh" self.head = DynamicHead(num_classes,self.hidden_dim,self.pooler_resolution,strides,[self.hidden_dim]*len(strides),return_intermediate=self.deep_supervision,num_heads=self.num_heads,use_focal=self.use_focal,use_fed_loss=self.use_fed_loss) # Loss parameters: # Build Criterion. matcher = HungarianMatcherDynamicK( cost_class=class_weight, cost_bbox=l1_weight, cost_giou=giou_weight, use_focal=self.use_focal,use_fed_loss=self.use_fed_loss ) weight_dict = {"loss_ce": class_weight, "loss_bbox": l1_weight, "loss_giou": giou_weight} if self.deep_supervision: aux_weight_dict = {} for i in range(self.num_heads - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) losses = ["labels", "boxes"] self.criterion = SetCriterionDynamicK( num_classes=self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses, use_focal=self.use_focal,use_fed_loss=self.use_fed_loss) def predict_noise_from_start(self, x_t, t, x0): return ( (extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) / extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) ) def model_predictions(self, backbone_feats,images_whwh,x,t,lost_features=None,fix_bboxes=False,x_self_cond=None,clip_x_start=False): def prepare(x,images_whwh): x_boxes = torch.clamp(x, min=-1 * self.scale, max=self.scale) x_boxes = ((x_boxes / self.scale) + 1) / 2 x_boxes = box_cxcywh_to_xyxy(x_boxes) x_boxes = x_boxes * images_whwh[:, None, :] return x_boxes def post(x_start,images_whwh): x_start = x_start / images_whwh[:, None, :] x_start = box_xyxy_to_cxcywh(x_start) x_start = (x_start * 2 - 1.) * self.scale x_start = torch.clamp(x_start, min=-1 * self.scale, max=self.scale) return x_start bs=len(x)//2 bboxes=prepare(x,images_whwh=images_whwh) start_time=time.time() outputs_class, outputs_coord,outputs_score = self.head(backbone_feats,torch.split(bboxes,bs,dim=0),t,lost_features,fix_bboxes) end_time=time.time() x_start = outputs_coord[-1] # (batch, num_proposals, 4) predict boxes: absolute coordinates (x1, y1, x2, y2) x_start=post(x_start,images_whwh=images_whwh) pred_noise = self.predict_noise_from_start(x,t,x_start) return ModelPrediction(pred_noise, x_start), outputs_class,outputs_coord,outputs_score,end_time-start_time @torch.no_grad() def new_ddim_sample(self,backbone_feats,images_whwh,ref_targets=None,dynamic_time=True,num_timesteps=1,num_proposals=500,inference_time_range=1,track_candidate=1,diffusion_t=200,clip_denoised=True): batch = images_whwh.shape[0]//2 self.sampling_timesteps,self.num_proposals,self.track_candidate,self.inference_time_range=num_timesteps,num_proposals,track_candidate,inference_time_range shape = (batch, self.num_proposals, 4) cur_bboxes= torch.randn(shape,device=self.device,dtype=self.dtype) ref_t_list=[] track_t_list=[] total_time=0 if ref_targets is None or self.track_candidate==0: ref_bboxes=torch.randn(shape, device=self.device) for i in range(batch): t = torch.randint(self.num_timesteps-self.inference_time_range, self.num_timesteps,(2,), device=self.device).long() if dynamic_time: ref_t,track_t=t[0],t[1] else: ref_t,track_t=t[0],t[0] ref_t_list.append(ref_t) track_t_list.append(track_t) else: labels =ref_targets[..., :5] nlabel = (labels.sum(dim=2) > 0).sum(dim=1) # number of objects shape = (batch, self.num_proposals, 4) diffused_boxes = [] cur_diffused_boxes=[] for batch_idx,num_gt in enumerate(nlabel): gt_bboxes_per_image = box_cxcywh_to_xyxy(labels[batch_idx, :num_gt]) image_size_xyxy = images_whwh[batch_idx] gt_boxes = gt_bboxes_per_image / image_size_xyxy # cxcywh gt_boxes = box_xyxy_to_cxcywh(gt_boxes) # t = torch.randint(self.num_timesteps-self.inference_time_range, self.num_timesteps,(2,), device=self.device).long() # if dynamic_time: # ref_t,track_t=t[0],t[1] # else: # ref_t,track_t=t[0],t[0] if batch_idx==0: ref_t=diffusion_t track_t=diffusion_t else: ref_t=diffusion_t track_t=diffusion_t self.track_candidate=4 d_boxes,d_noise,ref_label= self.prepare_diffusion_concat(gt_boxes,ref_t) diffused_boxes.append(d_boxes) ref_t_list.append(ref_t) d_boxes,d_noise,ref_label= self.prepare_diffusion_concat(gt_boxes,track_t,ref_label) cur_diffused_boxes.append(d_boxes) track_t_list.append(track_t) ref_bboxes=torch.stack(diffused_boxes) cur_bboxes=torch.stack(cur_diffused_boxes) sampling_timesteps, eta= self.sampling_timesteps, self.ddim_sampling_eta def get_time_pairs(t,sampling_timesteps): # [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == total_timesteps times = torch.linspace(-1, t - 1, steps=sampling_timesteps + 1) times = list(reversed(times.int().tolist())) time_pairs = list(zip(times[:-1], times[1:])) # [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)] return time_pairs ref_t_time_pairs_list=torch.tensor([get_time_pairs(t,sampling_timesteps) for t in ref_t_list],device=self.device,dtype=torch.long) track_t_time_pairs_list=torch.tensor([get_time_pairs(t,sampling_timesteps) for t in track_t_list],device=self.device,dtype=torch.long) # (batch,sampling_timesteps,2) bboxes=torch.cat([ref_bboxes,cur_bboxes],dim=0) x_start = None # for (ref_time, ref_time_next),(cur_time, cur_time_next) in zip(ref_time_pairs,cur_time_pairs): for sampling_timestep in range(sampling_timesteps): is_last=sampling_timestep==(sampling_timesteps-1) ref_time_cond = ref_t_time_pairs_list[:,sampling_timestep,0] cur_time_cond = track_t_time_pairs_list[:,sampling_timestep,0] time_cond=torch.cat([ref_time_cond,cur_time_cond],dim=0) self_cond = x_start if self.self_condition else None preds, outputs_class, outputs_coord,outputs_score,association_time = self.model_predictions(backbone_feats,images_whwh,bboxes,time_cond,fix_bboxes=False, x_self_cond=self_cond, clip_x_start=clip_denoised) total_time+=association_time pred_noise, x_start = preds.pred_noise, preds.pred_x_start if is_last: bboxes = x_start continue if self.box_renewal: # filter remain_list=[] pre_remain_bboxes=[] pre_remain_x_start=[] pre_remain_pred_noise=[] cur_remain_bboxes=[] cur_remain_x_start=[] cur_remain_pred_noise=[] for i in range(batch): # if i==0: # remain_list.append(len(pred_noise[i,:,:])) # pre_remain_pred_noise.append(pred_noise[i,:,:]) # cur_remain_pred_noise.append(pred_noise[i+batch,:,:]) # pre_remain_x_start.append(x_start[i,:,:]) # cur_remain_x_start.append(x_start[i+batch,:,:]) # pre_remain_bboxes.append(bboxes[i,:,:]) # cur_remain_bboxes.append(bboxes[i+batch,:,:]) # else: threshold = 0.2 score_per_image = outputs_score[-1][i] # pre_score=torch.sqrt(score_per_image*torch.sigmoid(outputs_class[-1][i])) # cur_score=torch.sqrt(score_per_image*torch.sigmoid(outputs_class[-1][i+batch])) # value=((pre_score+cur_score)/2).flatten() value, _ = torch.max(score_per_image, -1, keepdim=False) keep_idx = value >=threshold num_remain = torch.sum(keep_idx) remain_list.append(num_remain) pre_remain_pred_noise.append(pred_noise[i,keep_idx,:]) cur_remain_pred_noise.append(pred_noise[i+batch,keep_idx,:]) pre_remain_x_start.append(x_start[i,keep_idx,:]) cur_remain_x_start.append(x_start[i+batch,keep_idx,:]) pre_remain_bboxes.append(bboxes[i,keep_idx,:]) cur_remain_bboxes.append(bboxes[i+batch,keep_idx,:]) x_start=pre_remain_x_start+cur_remain_x_start bboxes=pre_remain_bboxes+cur_remain_bboxes pred_noise=pre_remain_pred_noise+cur_remain_pred_noise def diffusion(sampling_times,bboxes,x_start,pred_noise): times,time_nexts=sampling_times[:,0],sampling_times[:,1] alpha = torch.tensor([self.alphas_cumprod[time] for time in times],dtype=self.dtype,device=self.device) alpha_next = torch.tensor([self.alphas_cumprod[time_next] for time_next in time_nexts],dtype=self.dtype,device=self.device) sigma = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt() c = (1 - alpha_next - sigma ** 2).sqrt() if self.box_renewal: for i in range(batch): noise = torch.randn_like(bboxes[i]) bboxes[i] = x_start[i] * alpha_next[i].sqrt() + \ c[i] * pred_noise[i] + \ sigma[i] * noise bboxes[i] = torch.cat((bboxes[i], torch.randn(self.num_proposals - remain_list[i], 4, device=self.device)), dim=0) else: noise = torch.randn_like(bboxes) bboxes = x_start * alpha_next.sqrt()[:,None,None] + \ c[:,None,None] * pred_noise + \ sigma[:,None,None] * noise return bboxes bboxes[:batch]=diffusion(ref_t_time_pairs_list[:,sampling_timestep],bboxes[:batch],x_start[:batch],pred_noise[:batch]) bboxes[batch:]=diffusion(track_t_time_pairs_list[:,sampling_timestep],bboxes[batch:],x_start[batch:],pred_noise[batch:]) if self.box_renewal: bboxes=torch.stack(bboxes) box_cls = outputs_class[-1] box_pred = outputs_coord[-1] conf_score=outputs_score[-1] return torch.cat([box_pred.view(2*batch,-1,4),box_cls.view(2*batch,-1,1)],dim=-1),conf_score.view(batch,-1,1),total_time # forward diffusion def q_sample(self, x_start, t, noise=None): if noise is None: noise = torch.randn_like(x_start) sqrt_alphas_cumprod_t = extract(self.sqrt_alphas_cumprod, t, x_start.shape) sqrt_one_minus_alphas_cumprod_t = extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) return sqrt_alphas_cumprod_t * x_start + sqrt_one_minus_alphas_cumprod_t * noise def forward(self,features,mate_info,targets=None): mate_shape,mate_device,mate_dtype=mate_info self.device=mate_device self.dtype=mate_dtype b,_,h,w=mate_shape images_whwh=torch.tensor([w, h, w, h], dtype=self.dtype, device=self.device)[None,:].expand(2*b,4) if not self.training: results = self.new_ddim_sample(features,images_whwh,targets,dynamic_time=False) return results if self.training: targets, x_boxes, noises, t = self.prepare_targets(targets,images_whwh) t=t.squeeze(-1) # t[b:]=t[:b] x_boxes = x_boxes * images_whwh[:,None,:] pre_x_boxes,cur_x_boxes=torch.split(x_boxes,b,dim=0) outputs_class,outputs_coord,outputs_score = self.head(features,(pre_x_boxes,cur_x_boxes),t) output = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1],'pred_scores':outputs_score[-1]} if self.deep_supervision: output['aux_outputs'] = [{'pred_logits': a, 'pred_boxes': b,'pred_scores': c} for a, b, c in zip(outputs_class[:-1], outputs_coord[:-1],outputs_score[:-1])] loss_dict = self.criterion(output, targets) weight_dict = self.criterion.weight_dict for k in loss_dict.keys(): if k in weight_dict: loss_dict[k] *= weight_dict[k] return loss_dict def prepare_diffusion_repeat(self,gt_boxes,t,ref_repeat_tensor=None): """ :param gt_boxes: (cx, cy, w, h), normalized :param num_proposals: """ t = torch.full((1,),t,device=self.device).long() noise = torch.randn(self.num_proposals,4,device=self.device,dtype=self.dtype) num_gt = gt_boxes.shape[0] if not num_gt: # generate fake gt boxes if empty gt boxes gt_boxes = torch.as_tensor([[0.5, 0.5, 1., 1.]], dtype=self.dtype, device=self.device) num_gt = 1 num_repeat = self.num_proposals // num_gt # number of repeat except the last gt box in one image repeat_tensor = [num_repeat] * (num_gt - self.num_proposals % num_gt) + [num_repeat + 1] * ( self.num_proposals % num_gt) assert sum(repeat_tensor) == self.num_proposals random.shuffle(repeat_tensor) repeat_tensor = torch.tensor(repeat_tensor, device=self.device) if ref_repeat_tensor is not None: repeat_tensor=ref_repeat_tensor gt_boxes = (gt_boxes * 2. - 1.) * self.scale x_start = torch.repeat_interleave(gt_boxes, repeat_tensor, dim=0) if self.noise_strategy=="xy": noise[:,2:]=0 # noise sample x = self.q_sample(x_start=x_start, t=t, noise=noise) if self.training: x = torch.clamp(x, min=-1 * self.scale, max=self.scale) x = ((x / self.scale) + 1) / 2. diff_boxes = box_cxcywh_to_xyxy(x) else: diff_boxes=x return diff_boxes,noise,repeat_tensor def prepare_diffusion_concat(self,gt_boxes,t,ref_mask=None): """ :param gt_boxes: (cx, cy, w, h), normalized :param num_proposals: """ if self.training: self.track_candidate=1 t = torch.full((1,),t,device=self.device).long() noise = torch.randn(self.num_proposals, 4, device=self.device,dtype=self.dtype) select_mask=None num_gt = gt_boxes.shape[0]*self.track_candidate if not num_gt: # generate fake gt boxes if empty gt boxes gt_boxes = torch.as_tensor([[0.5, 0.5, 1., 1.]], dtype=self.dtype, device=self.device) num_gt = 1 else: gt_boxes=torch.repeat_interleave(gt_boxes,torch.tensor([self.track_candidate]*gt_boxes.shape[0],device=self.device),dim=0) if num_gt < self.num_proposals: box_placeholder = torch.randn(self.num_proposals - num_gt, 4, device=self.device,dtype=self.dtype) / 6. + 0.5 # 3sigma = 1/2 --> sigma: 1/6 # box_placeholder=torch.clip(torch.poisson(torch.clip(box_placeholder*5,min=0)),min=1,max=10)/10 # box_placeholder=torch.nn.init.uniform_(box_placeholder, a=0, b=1) # box_placeholder=torch.ones_like(box_placeholder) # box_placeholder[:,:2]=box_placeholder[:,:2]/2 box_placeholder[:, 2:4] = torch.clip(box_placeholder[:, 2:4], min=1e-4) x_start = torch.cat((gt_boxes, box_placeholder), dim=0) elif num_gt > self.num_proposals: select_mask = [True] * self.num_proposals + [False] * (num_gt - self.num_proposals) random.shuffle(select_mask) if ref_mask is not None: select_mask=ref_mask x_start = gt_boxes[select_mask] else: x_start = gt_boxes x_start = (x_start * 2. - 1.) * self.scale if self.noise_strategy=="xy": noise[:,2:]=0 # noise sample x = self.q_sample(x_start=x_start, t=t, noise=noise) if self.training: # x=x_start x = torch.clamp(x, min=-1 * self.scale, max=self.scale) x = ((x / self.scale) + 1) / 2. diff_boxes = box_cxcywh_to_xyxy(x) else: diff_boxes = x return diff_boxes, noise, select_mask def prepare_targets(self,targets,images_whwh): labels = targets[..., :5] nlabel = (labels.sum(dim=2) > 0).sum(dim=1) # number of objects new_targets = [] diffused_boxes = [] noises = [] ts = [] select_mask={} # select_t={} # select_gt_boxes={} for batch_idx,num_gt in enumerate(nlabel): target = {} gt_bboxes_per_image = box_cxcywh_to_xyxy(labels[batch_idx, :num_gt, 1:5]) gt_classes = labels[batch_idx, :num_gt, 0] image_size_xyxy = images_whwh[batch_idx] gt_boxes = gt_bboxes_per_image / image_size_xyxy # cxcywh gt_boxes = box_xyxy_to_cxcywh(gt_boxes) x_gt_boxes=gt_boxes d_t = torch.randint(0, self.num_timesteps, (1,), device=self.device).long()[0] ## baseline setting # if batch_idx 0 indices_batchi = (non_valid, torch.arange(0, 0).to(bz_out_prob_pre)) matched_qidx = torch.arange(0, 0).to(bz_out_prob_pre) indices.append(indices_batchi) matched_ids.append(matched_qidx) continue bz_gtboxs_pre = targets[batch_idx]['boxes'] # [num_gt, 4] normalized (cx, xy, w, h) bz_gtboxs_abs_xyxy_pre = targets[batch_idx]['boxes_xyxy'] bz_gtboxs_curr = targets[batch_idx+bs//2]['boxes'] # [num_gt, 4] normalized (cx, xy, w, h) bz_gtboxs_abs_xyxy_curr = targets[batch_idx+bs//2]['boxes_xyxy'] fg_mask_pre, is_in_boxes_and_center_pre = self.get_in_boxes_info( box_xyxy_to_cxcywh(bz_boxes_pre), # absolute (cx, cy, w, h) box_xyxy_to_cxcywh(bz_gtboxs_abs_xyxy_pre), # absolute (cx, cy, w, h) expanded_strides=32 ) fg_mask_curr, is_in_boxes_and_center_curr = self.get_in_boxes_info( box_xyxy_to_cxcywh(bz_boxes_curr), # absolute (cx, cy, w, h) box_xyxy_to_cxcywh(bz_gtboxs_abs_xyxy_curr), # absolute (cx, cy, w, h) expanded_strides=32 ) fg_mask=fg_mask_pre&fg_mask_curr is_in_boxes_and_center=is_in_boxes_and_center_pre&is_in_boxes_and_center_curr pair_wise_ious_pre = ops.box_iou(bz_boxes_pre, bz_gtboxs_abs_xyxy_pre) pair_wise_ious_curr = ops.box_iou(bz_boxes_curr, bz_gtboxs_abs_xyxy_curr) pair_wise_ious=(pair_wise_ious_pre+pair_wise_ious_curr)/2 cost_class=0 bz_out_prob_set=[bz_out_prob_pre,bz_out_prob_curr] bz_tgt_ids_set=[bz_tgt_ids_pre,bz_tgt_ids_curr] # Compute the classification cost. if self.use_focal: alpha = self.focal_loss_alpha gamma = self.focal_loss_gamma for bz_out_prob,bz_tgt_ids in zip(bz_out_prob_set,bz_tgt_ids_set): neg_cost_class = (1 - alpha) * (bz_out_prob ** gamma) * (-(1 - bz_out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - bz_out_prob) ** gamma) * (-(bz_out_prob + 1e-8).log()) cost_class += pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids] elif self.use_fed_loss: # focal loss degenerates to naive one for bz_out_prob,bz_tgt_ids in zip(bz_out_prob_set,bz_tgt_ids_set): neg_cost_class = (-(1 - bz_out_prob + 1e-8).log()) pos_cost_class = (-(bz_out_prob + 1e-8).log()) cost_class += pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids] else: for bz_out_prob,bz_tgt_ids in zip(bz_out_prob_set,bz_tgt_ids_set): cost_class += -bz_out_prob[:, bz_tgt_ids] # Compute the L1 cost between boxes # image_size_out = torch.cat([v["image_size_xyxy"].unsqueeze(0) for v in targets]) # image_size_out = image_size_out.unsqueeze(1).repeat(1, num_queries, 1).flatten(0, 1) # image_size_tgt = torch.cat([v["image_size_xyxy_tgt"] for v in targets]) bz_image_size_out_pre = targets[batch_idx]['image_size_xyxy'] bz_image_size_tgt_pre = targets[batch_idx]['image_size_xyxy_tgt'] bz_image_size_out_curr = targets[batch_idx+bs//2]['image_size_xyxy'] bz_image_size_tgt_curr = targets[batch_idx+bs//2]['image_size_xyxy_tgt'] bz_out_bbox_pre = bz_boxes_pre / bz_image_size_out_pre # normalize (x1, y1, x2, y2) bz_out_bbox_curr = bz_boxes_curr / bz_image_size_out_curr # normalize (x1, y1, x2, y2) bz_tgt_bbox_pre = bz_gtboxs_abs_xyxy_pre / bz_image_size_tgt_pre # normalize (x1, y1, x2, y2) bz_tgt_bbox_curr = bz_gtboxs_abs_xyxy_curr / bz_image_size_tgt_curr # normalize (x1, y1, x2, y2) cost_bbox_pre = torch.cdist(bz_out_bbox_pre, bz_tgt_bbox_pre, p=1) cost_bbox_curr = torch.cdist(bz_out_bbox_curr, bz_tgt_bbox_curr, p=1) cost_giou = -generalized_box_iou(bz_boxes_pre,bz_boxes_curr,bz_gtboxs_abs_xyxy_pre,bz_gtboxs_abs_xyxy_curr) # Final cost matrix cost = self.cost_bbox * (cost_bbox_pre+cost_bbox_curr)/2 + self.cost_class * cost_class/2 + self.cost_giou * cost_giou + 100.0 * (~is_in_boxes_and_center) assert not torch.any(torch.isnan(cost)),"Error nan value occurs" # cost = (cost_class + 3.0 * cost_giou + 100.0 * (~is_in_boxes_and_center)) # [num_query,num_gt] cost[~fg_mask] = cost[~fg_mask] + 10000.0 # if bz_gtboxs.shape[0]>0: indices_batchi, matched_qidx = self.dynamic_k_matching(cost, pair_wise_ious, bz_gtboxs_pre.shape[0]) indices.append(indices_batchi) matched_ids.append(matched_qidx) return indices, matched_ids def get_in_boxes_info(self, boxes, target_gts, expanded_strides): xy_target_gts = box_cxcywh_to_xyxy(target_gts) # (x1, y1, x2, y2) anchor_center_x = boxes[:, 0].unsqueeze(1) anchor_center_y = boxes[:, 1].unsqueeze(1) # whether the center of each anchor is inside a gt box b_l = anchor_center_x > xy_target_gts[:, 0].unsqueeze(0) b_r = anchor_center_x < xy_target_gts[:, 2].unsqueeze(0) b_t = anchor_center_y > xy_target_gts[:, 1].unsqueeze(0) b_b = anchor_center_y < xy_target_gts[:, 3].unsqueeze(0) # (b_l.long()+b_r.long()+b_t.long()+b_b.long())==4 [300,num_gt] , is_in_boxes = ((b_l.long() + b_r.long() + b_t.long() + b_b.long()) == 4) is_in_boxes_all = is_in_boxes.sum(1) > 0 # [num_query] # in fixed center center_radius = 2.5 # Modified to self-adapted sampling --- the center size depends on the size of the gt boxes # https://github.com/dulucas/UVO_Challenge/blob/main/Track1/detection/mmdet/core/bbox/assigners/rpn_sim_ota_assigner.py#L212 b_l = anchor_center_x > (target_gts[:, 0] - (center_radius * (xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0) b_r = anchor_center_x < (target_gts[:, 0] + (center_radius * (xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0) b_t = anchor_center_y > (target_gts[:, 1] - (center_radius * (xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0) b_b = anchor_center_y < (target_gts[:, 1] + (center_radius * (xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0) is_in_centers = ((b_l.long() + b_r.long() + b_t.long() + b_b.long()) == 4) is_in_centers_all = is_in_centers.sum(1) > 0 is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all is_in_boxes_and_center = (is_in_boxes & is_in_centers) return is_in_boxes_anchor, is_in_boxes_and_center def dynamic_k_matching(self, cost, pair_wise_ious, num_gt): matching_matrix = torch.zeros_like(cost) # [300,num_gt] ious_in_boxes_matrix = pair_wise_ious n_candidate_k = self.ota_k # Take the sum of the predicted value and the top 10 iou of gt with the largest iou as dynamic_k topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=0) dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) for gt_idx in range(num_gt): _, pos_idx = torch.topk(cost[:, gt_idx], k=dynamic_ks[gt_idx].item(), largest=False) matching_matrix[:, gt_idx][pos_idx] = 1.0 del topk_ious, dynamic_ks, pos_idx anchor_matching_gt = matching_matrix.sum(1) if (anchor_matching_gt > 1).sum() > 0: _, cost_argmin = torch.min(cost[anchor_matching_gt > 1], dim=1) matching_matrix[anchor_matching_gt > 1] *= 0 matching_matrix[anchor_matching_gt > 1, cost_argmin,] = 1 while (matching_matrix.sum(0) == 0).any(): num_zero_gt = (matching_matrix.sum(0) == 0).sum() matched_query_id = matching_matrix.sum(1) > 0 cost[matched_query_id] += 100000.0 unmatch_id = torch.nonzero(matching_matrix.sum(0) == 0, as_tuple=False).squeeze(1) for gt_idx in unmatch_id: pos_idx = torch.argmin(cost[:, gt_idx]) matching_matrix[:, gt_idx][pos_idx] = 1.0 if (matching_matrix.sum(1) > 1).sum() > 0: # If a query matches more than one gt _, cost_argmin = torch.min(cost[anchor_matching_gt > 1], dim=1) # find gt for these queries with minimal cost matching_matrix[anchor_matching_gt > 1] *= 0 # reset mapping relationship matching_matrix[anchor_matching_gt > 1, cost_argmin,] = 1 # keep gt with minimal cost assert not (matching_matrix.sum(0) == 0).any() selected_query = matching_matrix.sum(1) > 0 gt_indices = matching_matrix[selected_query].max(1)[1] assert selected_query.sum() == len(gt_indices) cost[matching_matrix == 0] = cost[matching_matrix == 0] + float('inf') matched_query_id = torch.min(cost, dim=0)[1] return (selected_query, gt_indices), matched_query_id ================================================ FILE: diffusion/models/diffusion_models.py ================================================ import copy import math import numpy as np import torch from torch import einsum, nn import torch.nn.functional as F from einops import rearrange, repeat from einops_exts import rearrange_many def exists(val): return val is not None from detectron2.modeling.poolers import ROIPooler from detectron2.structures import Boxes _DEFAULT_SCALE_CLAMP = math.log(100000.0 / 16) class SinusoidalPositionEmbeddings(nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, time): device = time.device half_dim = self.dim // 2 embeddings = math.log(10000) / (half_dim - 1) embeddings = torch.exp(torch.arange(half_dim, device=device) * -embeddings) embeddings = time[:, None] * embeddings[None, :] embeddings = torch.cat((embeddings.sin(), embeddings.cos()), dim=-1) return embeddings class GaussianFourierProjection(nn.Module): """Gaussian random features for encoding time steps.""" def __init__(self, embed_dim, scale=30.): super().__init__() # Randomly sample weights during initialization. These weights are fixed # during optimization and are not trainable. self.W = nn.Parameter(torch.randn(embed_dim // 2) * scale, requires_grad=False) def forward(self, x): x_proj = x[:, None] * self.W[None, :] * 2 * np.pi return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1) class Dense(nn.Module): """A fully connected layer that reshapes outputs to feature maps.""" def __init__(self, input_dim, output_dim): super().__init__() self.dense = nn.Linear(input_dim, output_dim) def forward(self, x): return self.dense(x) class DynamicHead(nn.Module): def __init__(self, num_classes, d_model, pooler_resolution, strides, in_channels, dim_feedforward = 2048, nhead = 8, dropout = 0.0, activation = "relu", num_heads = 6, return_intermediate=True, use_focal=False, use_fed_loss=False, prior_prob=0.01 ): super().__init__() # Build RoI. box_pooler = self._init_box_pooler(pooler_resolution,strides,in_channels) self.box_pooler = box_pooler # Build heads. rcnn_head = RCNNHead(d_model, num_classes,pooler_resolution, dim_feedforward, nhead, dropout, activation,use_focal=use_focal,use_fed_loss=use_fed_loss) self.head_series = _get_clones(rcnn_head, num_heads) self.num_heads = num_heads self.return_intermediate = return_intermediate # Gaussian random feature embedding layer for time self.d_model = d_model time_dim = d_model * 4 self.time_mlp = nn.Sequential( SinusoidalPositionEmbeddings(d_model), nn.Linear(d_model, time_dim), nn.GELU(), nn.Linear(time_dim, time_dim), ) # Init parameters. self.use_focal = use_focal self.use_fed_loss = use_fed_loss self.num_classes = num_classes if self.use_focal or self.use_fed_loss: self.bias_value = -math.log((1 - prior_prob) / prior_prob) self._reset_parameters() def _reset_parameters(self): # init all parameters. for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) # initialize the bias for focal loss and fed loss. if self.use_focal or self.use_fed_loss: if p.shape[-1] == self.num_classes or p.shape[-1] == self.num_classes + 1: nn.init.constant_(p, self.bias_value) @staticmethod def _init_box_pooler(pooler_resolution,strides,in_channels): pooler_scales = [1/s for s in strides] sampling_ratio = 2 pooler_type = "ROIAlignV2" # If StandardROIHeads is applied on multiple feature maps (as in FPN), # then we share the same predictors and therefore the channel counts must be the same # Check all channel counts are equal assert len(set(in_channels)) == 1, in_channels box_pooler = ROIPooler( output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, pooler_type=pooler_type, ) return box_pooler def forward(self,features,init_bboxes,t,lost_features=None,fix_ref_boxes=False): # assert t shape (batch_size) time = self.time_mlp(t) inter_class_logits = [] inter_pred_bboxes = [] inter_association_scores=[] bboxes = init_bboxes proposal_features = None for head_idx, rcnn_head in enumerate(self.head_series): class_logits, pred_bboxes, proposal_features ,association_score_logits= rcnn_head(features, bboxes, proposal_features,self.box_pooler,time,lost_features,fix_ref_boxes) if self.return_intermediate: inter_class_logits.append(torch.cat(class_logits,dim=0)) inter_pred_bboxes.append(torch.cat(pred_bboxes,dim=0)) inter_association_scores.append(torch.sigmoid(association_score_logits)) bboxes = (pred_bbox.detach() for pred_bbox in pred_bboxes) if self.return_intermediate: return torch.stack(inter_class_logits), torch.stack(inter_pred_bboxes),torch.stack(inter_association_scores) return torch.cat(class_logits,dim=0)[None],torch.cat(pred_bboxes,dim=0)[None],torch.sigmoid(association_score_logits)[None] class RCNNHead(nn.Module): def __init__(self,d_model, num_classes, pooler_resolution,dim_feedforward=2048, nhead=8, dropout=0.1, activation="relu", scale_clamp: float = _DEFAULT_SCALE_CLAMP, bbox_weights=(2.0, 2.0, 1.0, 1.0),use_focal=False,use_fed_loss=False): super().__init__() self.d_model = d_model # dynamic. self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout,batch_first=True) # self.self_attn = FlashSelfAttention(d_model, nhead, attn_drop=dropout) # self.self_attn = WindowAttention(d_model,(8,8),nhead,attn_drop=dropout) # self.cross_attn = nn.MultiheadAttention(d_model,nhead,dropout=dropout) # self.stf=STF(dim=d_model) self.stf=SFT(d_model,pooler_resolution=pooler_resolution) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.norm3 = nn.LayerNorm(d_model) # self.norm4 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) # self.dropout4 = nn.Dropout(dropout) self.activation = _get_activation_fn(activation) # block time mlp self.block_time_mlp = nn.Sequential(nn.SiLU(), nn.Linear(d_model * 4, d_model * 2)) # cls. num_cls = 1 cls_module = list() for _ in range(num_cls): cls_module.append(nn.Linear(d_model, d_model,False)) cls_module.append(nn.LayerNorm(d_model)) cls_module.append(nn.ReLU(inplace=True)) self.cls_module = nn.ModuleList(cls_module) # association score. num_score = 1 score_module = list() for _ in range(num_score): score_module.append(nn.Linear(2*d_model, d_model,False)) score_module.append(nn.LayerNorm(d_model)) score_module.append(nn.ReLU(inplace=True)) self.score_module = nn.ModuleList(score_module) # reg. num_reg = 3 reg_module = list() for _ in range(num_reg): reg_module.append(nn.Linear(d_model, d_model,True)) reg_module.append(nn.LayerNorm(d_model)) reg_module.append(nn.ReLU(inplace=True)) self.reg_module = nn.ModuleList(reg_module) # pred. self.use_focal = use_focal self.use_fed_loss = use_fed_loss if self.use_focal or self.use_fed_loss: self.class_logits = nn.Linear(d_model, num_classes) else: self.class_logits = nn.Linear(d_model, num_classes + 1) self.score_logits=nn.Linear(d_model,1) self.bboxes_delta = nn.Linear(d_model, 4) self.scale_clamp = scale_clamp self.bbox_weights = bbox_weights nn.init.constant_(self.class_logits.bias,-math.log((1 - 1e-2) / 1e-2)) nn.init.constant_(self.bboxes_delta.bias,-math.log((1 - 1e-2) / 1e-2)) for sub_module in self.reg_module: if isinstance(sub_module,nn.Linear): nn.init.constant_(sub_module.bias,-math.log((1 - 1e-2) / 1e-2)) def forward(self, features,bboxes,pro_features,pooler,time_emb,lost_features=None,fix_ref_boxes=False): """ :param bboxes: (N, nr_boxes, 4) :param pro_features: (N, nr_boxes, d_model) """ if pro_features is not None: # pro_features_pre,pro_features_curr=pro_features pro_features_x=pro_features else: pro_features_x=None bboxes_pre,bboxes_cur=bboxes N, nr_boxes = bboxes_pre.shape[:2] # rnd_idx = torch.randperm(nr_boxes) # bboxes_pre=bboxes_pre[:,rnd_idx,:] # bboxes_cur=bboxes_cur[:,rnd_idx,:] # roi_feature. proposal_boxes_pre = list() proposal_boxes_curr = list() for b in range(N): proposal_boxes_pre.append(Boxes(bboxes_pre[b])) proposal_boxes_curr.append(Boxes(bboxes_cur[b])) roi_features_pre = pooler(features[0], proposal_boxes_pre) if lost_features is not None: roi_features_pre[roi_features_pre.shape[0]-lost_features.shape[0]:]=lost_features roi_features_curr = pooler(features[1], proposal_boxes_curr) if pro_features_x is None: pro_features_pre = roi_features_pre.view(N, nr_boxes, self.d_model, -1).mean(-1) pro_features_curr=roi_features_curr.view(N, nr_boxes, self.d_model, -1).mean(-1) pro_features_x=torch.cat([pro_features_pre,pro_features_curr],dim=0) # else: # pro_features_pre=pro_features_pre.reshape(N, nr_boxes, self.d_model)[:,rnd_idx,:] # pro_features_curr=pro_features_curr.reshape(N, nr_boxes, self.d_model)[:,rnd_idx,:] roi_features_pre = roi_features_pre.view(N,nr_boxes, self.d_model, -1).permute(0,1,3,2) roi_features_curr = roi_features_curr.view(N,nr_boxes, self.d_model, -1).permute(0,1,3,2) roi_features_x=torch.cat([torch.cat([roi_features_pre,roi_features_curr],dim=-2).unsqueeze(2), torch.cat([roi_features_curr,roi_features_pre],dim=-2).unsqueeze(2)],dim=2) # self_att. pro_features_x = pro_features_x.view(2*N, nr_boxes, self.d_model) # pro_features_pre =pro_features_pre+ self.dropout1(self.self_attn(pro_features_pre, pro_features_pre, pro_features_pre,20,25)) pro_features_x =pro_features_x+ self.dropout1(self.self_attn(pro_features_x, pro_features_x, value=pro_features_x)[0]) # pro_features_x =pro_features_x+ self.dropout1(self.self_attn(pro_features_x)) pro_features_x = self.norm1(pro_features_x) # pro_features_curr = pro_features_curr.view(N, nr_boxes, self.d_model).permute(1, 0, 2) # pro_features_curr = pro_features_curr+ self.dropout1(self.self_attn(pro_features_curr, pro_features_curr,value=pro_features_curr)[0]) # # pro_features_curr = pro_features_curr+ self.dropout1(self.self_attn(pro_features_curr, pro_features_curr,pro_features_curr,20,25)) # pro_features_curr = self.norm1(pro_features_curr) # cross_interact # pro_features_pre = pro_features_pre.view(nr_boxes, N, self.d_model).permute(1, 0, 2).reshape(1, N * nr_boxes, self.d_model) # pro_features_pre =pro_features_pre+self.dropout2(self.cross_interact(pro_features_pre, roi_features_curr)) # pro_features_pre = self.norm2(pro_features_pre) # pro_features_curr = pro_features_curr.view(nr_boxes, N, self.d_model).permute(1, 0, 2).reshape(1, N * nr_boxes, self.d_model) # pro_features_curr =pro_features_curr+self.dropout2(self.cross_interact(pro_features_curr, roi_features_pre)) # pro_features_curr = self.norm2(pro_features_curr) pro_features_x=torch.cat([x.unsqueeze(2) for x in pro_features_x.split(N,dim=0)],dim=-2) pro_features_x=pro_features_x+self.dropout2(self.stf(roi_features_x,pro_features_x)) pro_features_x = self.norm2(pro_features_x) # roi_features_x=torch.cat([roi_features_curr.unsqueeze(2),roi_features_pre.unsqueeze(2)],dim=-2) # pro_features_x=pro_features_x+self.dropout4(self.stf2(roi_features_x,pro_features_x)) # pro_features_x = self.norm4(pro_features_x) pro_features_x=torch.cat([x.squeeze(2) for x in pro_features_x.split(1,dim=-2)],dim=0).reshape(2*N*nr_boxes,-1) # inst_interact. # pro_features_pre = pro_features_pre.view(nr_boxes, N, self.d_model).permute(1, 0, 2).reshape(1, N * nr_boxes, self.d_model) # pro_features_pre =pro_features_pre+self.dropout3(self.inst_interact(pro_features_pre, roi_features_pre)) # obj_features_pre = self.norm3(pro_features_pre) # # pro_features_curr = pro_features_curr.view(nr_boxes, N, self.d_model).permute(1, 0, 2).reshape(1, N * nr_boxes, self.d_model) # pro_features_curr =pro_features_curr+self.dropout3(self.inst_interact(pro_features_curr, roi_features_curr)) # obj_features_curr = self.norm3(pro_features_curr) # obj_feature. obj_features_tmp =self.linear2(self.dropout(self.activation(self.linear1(pro_features_x)))) obj_features=pro_features_x+self.dropout3(obj_features_tmp) obj_features= self.norm3(obj_features) # obj_features_curr_tmp =self.linear2(self.dropout(self.activation(self.linear1(obj_features_curr)))) # obj_features_curr=obj_features_curr+self.dropout4(obj_features_curr_tmp) # obj_features_curr = self.norm4(obj_features_curr) # fc_feature_pre = obj_features_pre.transpose(0, 1).reshape(N * nr_boxes, -1) # fc_feature_curr = obj_features_curr.transpose(0, 1).reshape(N * nr_boxes, -1) # all_features=[fc_feature_pre,fc_feature_curr] # all_features=[] # for fc_feature,fc_time_emb in zip([fc_feature_pre,fc_feature_curr],time_emb.split(N,dim=0)): scale_shift = self.block_time_mlp(time_emb) scale_shift = torch.repeat_interleave(scale_shift, nr_boxes, dim=0) scale, shift = scale_shift.chunk(2, dim=1) fc_feature = obj_features * (scale + 1) + shift # all_features.append(fc_feature) cls_feature= fc_feature.clone() reg_feature= fc_feature.clone() score_feature= torch.cat(fc_feature.clone().split(N*nr_boxes,dim=0),dim=-1) for cls_layer in self.cls_module: cls_feature= cls_layer(cls_feature) for score_layer in self.score_module: score_feature=score_layer(score_feature) for reg_layer in self.reg_module: reg_feature= reg_layer(reg_feature) class_logits = self.class_logits(cls_feature) bboxes_deltas= self.bboxes_delta(reg_feature) class_logits_pre,class_logits_curr=class_logits.split(N*nr_boxes,dim=0) bboxes_deltas_pre,bboxes_deltas_curr=bboxes_deltas.split(N*nr_boxes,dim=0) association_score=self.score_logits(score_feature) pred_bboxes_pre = self.apply_deltas(bboxes_deltas_pre, bboxes_pre.view(-1, 4)) if fix_ref_boxes: assert not self.training,"fix reference bboxes only for inference mode" pred_bboxes_pre[:nr_boxes]=bboxes_pre[0,:nr_boxes] pred_bboxes_curr = self.apply_deltas(bboxes_deltas_curr, bboxes_cur.view(-1, 4)) return (class_logits_pre.view(N, nr_boxes, -1),class_logits_curr.view(N, nr_boxes, -1)), (pred_bboxes_pre.view(N, nr_boxes, -1),pred_bboxes_curr.view(N, nr_boxes, -1)),obj_features,association_score.view(N, nr_boxes, -1) def apply_deltas(self, deltas, boxes): """ Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`. Args: deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. deltas[i] represents k potentially different class-specific box transformations for the single box boxes[i]. boxes (Tensor): boxes to transform, of shape (N, 4) """ boxes = boxes.to(deltas.dtype) widths = boxes[:, 2] - boxes[:, 0] heights = boxes[:, 3] - boxes[:, 1] ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights wx, wy, ww, wh = self.bbox_weights dx = deltas[:, 0::4] / wx dy = deltas[:, 1::4] / wy dw = deltas[:, 2::4] / ww dh = deltas[:, 3::4] / wh # Prevent sending too large values into torch.exp() dw = torch.clamp(dw, max=self.scale_clamp) dh = torch.clamp(dh, max=self.scale_clamp) pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] pred_w = torch.exp(dw) * widths[:, None] pred_h = torch.exp(dh) * heights[:, None] pred_boxes = torch.zeros_like(deltas) pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1 pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1 pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2 pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2 return pred_boxes class SFT(nn.Module): def __init__(self, hidden_dim, pooler_resolution,dim_dynamic=2*64,num_dynamic=2): super().__init__() self.hidden_dim = hidden_dim self.dim_dynamic = dim_dynamic self.num_dynamic = num_dynamic self.pooler_resolution= pooler_resolution self.num_params = self.hidden_dim * self.dim_dynamic self.dynamic_layer = nn.Linear(self.hidden_dim, self.num_dynamic * self.num_params) self.norm1 = nn.LayerNorm(self.dim_dynamic) self.norm2 = nn.LayerNorm(self.hidden_dim) self.activation = nn.ReLU(inplace=True) num_output = 2*self.hidden_dim * self.pooler_resolution ** 2 self.num_output= 2*self.pooler_resolution ** 2 self.out_layer = nn.Linear(num_output, self.hidden_dim) self.norm3 = nn.LayerNorm(self.hidden_dim) def forward(self,roi_features,pro_features): ''' pro_features: ( N,nr_boxes,2,self.d_model) roi_features: ( N,nr_boxes,2,49*2,self.d_model) ''' N=pro_features.shape[0] # features=torch.cat([x.unsqueeze(2) for x in roi_features.split(self.num_output,dim=-2)],dim=2).reshape(-1,self.num_output,self.hidden_dim) features = roi_features.reshape(-1,self.num_output,self.hidden_dim) parameters = self.dynamic_layer(pro_features) param1 = parameters[:, :, :,:self.num_params].reshape(-1, self.hidden_dim, self.dim_dynamic) param2 = parameters[:, :, :,self.num_params:].reshape(-1, self.dim_dynamic, self.hidden_dim) features = torch.bmm(features, param1) features = self.norm1(features) features = self.activation(features) features = torch.bmm(features, param2) features = self.norm2(features) features = self.activation(features) features = features.flatten(1) features = self.out_layer(features) features = self.norm3(features) features = self.activation(features) return features.reshape(N,-1,2,self.hidden_dim) class PerceiverAttention(nn.Module): def __init__(self, *, dim, dim_head=64, heads=8): super().__init__() self.scale = dim_head**-0.5 self.heads = heads inner_dim = dim_head * heads self.norm_media = nn.LayerNorm(dim) self.norm_latents = nn.LayerNorm(dim) self.to_q = nn.Linear(dim, inner_dim, bias=False) self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) self.to_out = nn.Linear(inner_dim, dim, bias=False) def forward(self, x, latents): """ Args: x (torch.Tensor): image features shape (b, T, n1, D) latent (torch.Tensor): latent features shape (b, T, n2, D) """ x = self.norm_media(x) latents = self.norm_latents(latents) h = self.heads q = self.to_q(latents) kv_input = torch.cat((x, latents), dim=-2) k, v = self.to_kv(kv_input).chunk(2, dim=-1) q, k, v = rearrange_many((q, k, v), "b t n (h d) -> b h t n d", h=h) q = q * self.scale # attention sim = einsum("... i d, ... j d -> ... i j", q, k) sim = sim - sim.amax(dim=-1, keepdim=True).detach() attn = sim.softmax(dim=-1) out = einsum("... i j, ... j d -> ... i d", attn, v) out = rearrange(out, "b h t n d -> b t n (h d)", h=h) return self.to_out(out) def FeedForward(dim, mult=4): inner_dim = int(dim * mult) return nn.Sequential( nn.LayerNorm(dim), nn.Linear(dim, inner_dim, bias=False), nn.GELU(), nn.Linear(inner_dim, dim, bias=False), ) # class STF(nn.Module): # def __init__( # self, # *, # dim, # depth=2, # dim_head=64, # heads=8, # ff_mult=4, # ): # super().__init__() # # self.latents = nn.Parameter(torch.randn(num_latents, dim)) # self.layers = nn.ModuleList([]) # for _ in range(depth): # self.layers.append( # nn.ModuleList( # [ # PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads), # FeedForward(dim=dim, mult=ff_mult), # ] # ) # ) # self.norm = nn.LayerNorm(dim) # def forward(self,roi_features,pro_features): # ''' # pro_features: ( N,nr_boxes,2,self.d_model) # roi_features: ( N,nr_boxes,2,49*2,self.d_model) # ''' # b,n,x,dim=pro_features.shape # # blocks # latents=pro_features.reshape(b,n*x,1,-1) # roi_features=roi_features.reshape(b,n*x,-1,dim) # for attn, ff in self.layers: # latents = attn(roi_features, latents) + latents # latents = ff(latents) + latents # return self.norm(latents).reshape(b,n,x,dim) class WindowAttention(nn.Module): """ Window based multi-head self attention (W-MSA) module with relative position bias. It supports both of shifted and non-shifted window. Args: dim (int): Number of input channels. window_size (tuple[int]): The height and width of the window. num_heads (int): Number of attention heads. qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 proj_drop (float, optional): Dropout ratio of output. Default: 0.0 """ def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): super().__init__() self.dim = dim self.window_size = window_size # Wh, Ww self.num_heads = num_heads head_dim = dim // num_heads self.scale = qk_scale or head_dim ** -0.5 self.to_q = nn.Linear(dim, dim, bias=qkv_bias) self.to_k = nn.Linear(dim, dim, bias=qkv_bias) self.to_v = nn.Linear(dim, dim, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) # trunc_normal_(self.relative_position_bias_table, std=.02) self.softmax = nn.Softmax(dim=-1) def forward(self,q,k,v,H,W): """ Forward function. Args: x: input features with shape of (num_windows*B, N, C) mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None """ B_, N, C = q.shape assert N==k.shape[1] and N==v.shape[1],"query,key and value must have equal length" pad_l = pad_t = 0 pad_r = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1] pad_b = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0] Hp, Wp=0,0 def mode_charge(x): x = x.reshape(B_, H, W, C) x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) _, Hp, Wp, _ = x.shape x = window_partition(x, self.window_size[0]) # nW*B, window_size, window_size, C x = x.view(-1, self.window_size[1] * self.window_size[0], C) # nW*B, window_size*window_size, C return x,Hp,Wp (q,Hp,Wp),(k,_,_),(v,_,_)=mode_charge(q),mode_charge(k),mode_charge(v) B_w = q.shape[0] N_w = q.shape[1] q= self.to_q(q).reshape(B_w, N_w, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)[0] k= self.to_k(k).reshape(B_w, N_w, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)[0] v= self.to_v(v).reshape(B_w, N_w, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)[0] q = q * self.scale attn = (q @ k.transpose(-2, -1)) attn = self.softmax(attn) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B_w, N_w, C) x = self.proj(x) x = self.proj_drop(x) x = x.view(-1, self.window_size[1], self.window_size[0], C) x = window_reverse(x, self.window_size[0], Hp, Wp) # B H' W' C if pad_r > 0 or pad_b > 0: x = x[:, :H, :W, :].contiguous() x = x.view(B_, H * W, C) return x def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) def _get_activation_fn(activation): """Return an activation function given a string""" if activation == "relu": return F.relu if activation == "gelu": return F.gelu if activation == "glu": return F.glu raise RuntimeError(F"activation should be relu/gelu, not {activation}.") def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x # from flash_attn import flash_attn_qkvpacked_func, flash_attn_func # class FlashSelfAttention(nn.Module): # def __init__(self, dim,num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): # super().__init__() # self.dim = dim # self.num_heads = num_heads # head_dim = dim // num_heads # self.scale = qk_scale or head_dim ** -0.5 # # self.in_proj = nn.Linear(dim, 3*dim, bias=qkv_bias) # self.in_proj_weight = nn.Parameter(torch.empty((3 * dim,dim))) # if qkv_bias: # self.in_proj_bias = nn.Parameter(torch.empty(3 * dim)) # else: # self.register_parameter('in_proj_bias', None) # self.attn_drop = nn.Dropout(attn_drop) # self.out_proj = nn.Linear(dim, dim) # self.proj_drop = nn.Dropout(proj_drop) # def forward(self,x): # """ # x: B,N,C # """ # B_, N, C = x.shape # qkv=F.linear(x, self.in_proj_weight , self.in_proj_bias).reshape(B_,N,3,self.num_heads,-1) # x=flash_attn_qkvpacked_func(qkv,self.attn_drop.p if self.training else 0.0,softmax_scale=self.scale).reshape(B_,N,-1) # x=self.out_proj(x) # x=self.proj_drop(x) # return x ================================================ FILE: diffusion/models/diffusionnet.py ================================================ import math import random from typing import List from collections import namedtuple import torch import torch.nn.functional as F from torch import nn from yolox.models.yolo_pafpn import YOLOPAFPN from .diffusion_head import DiffusionHead from yolox.models.network_blocks import BaseConv class DiffusionNet(nn.Module): """ Implement DiffusionNet """ def __init__(self, backbone=None, head=None, act="silu"): super().__init__() self.backbone=backbone self.head=head self.projs=nn.ModuleList() in_channels=backbone.in_channels for i in range(len(in_channels)): self.projs.append( BaseConv( in_channels=int(in_channels[i] * head.width), out_channels=int(head.hidden_dim), ksize=1, stride=1, act=act, )) def forward(self, x, targets=(None,None),random_flip=False,input_size=None): # fpn output content features of [dark3, dark4, dark5] # x format (pre_imgs,cur_imgs) (B,C,H,W) # targets format (pre_targets,cur_targets) (B,N,5) class cx cy w h pre_imgs,cur_imgs=x pre_targets,cur_targets=targets mate_info=(pre_imgs.shape,pre_imgs.device,pre_imgs.dtype) bs,_,_,_=mate_info[0] if cur_imgs is None: x_input=pre_imgs else: x_input=torch.cat([pre_imgs,cur_imgs],dim=0) fpn_outs = self.backbone(x_input) flip_mode=False if random_flip and torch.randn((1,1))[0]>0.5: flip_mode=True pre_features,cur_features=[],[] for proj,x_out in zip(self.projs,fpn_outs): l_feat=proj(x_out) if cur_imgs is None: pre_features.append(l_feat) if flip_mode: cur_features.append(torch.flip(l_feat,dims=[3])) else: cur_features.append(l_feat.clone()) else: pre_l_feat,cur_l_feat=l_feat.split(bs,dim=0) pre_features.append(pre_l_feat) cur_features.append(cur_l_feat) features=(pre_features,cur_features) if self.training: assert pre_targets is not None if cur_targets is None: cur_targets=pre_targets.clone() if flip_mode: nlabels=(cur_targets.sum(-1)>0).sum(-1) for idx,nlabel in enumerate(nlabels): cur_targets[idx,:nlabel,1]=input_size[1]-cur_targets[idx,:nlabel,1] loss_dict = self.head( features,mate_info,targets=torch.cat([pre_targets,cur_targets],dim=0)) if 'total_loss' not in loss_dict: loss_dict['total_loss']=sum(loss_dict.values()) outputs=loss_dict return outputs else: outputs = self.head(features,mate_info,targets=pre_targets) return outputs ================================================ FILE: exps/default/nano.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. import os import torch.nn as nn from yolox.exp import Exp as MyExp class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.depth = 0.33 self.width = 0.25 self.scale = (0.5, 1.5) self.random_size = (10, 20) self.test_size = (416, 416) self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] self.enable_mixup = False def get_model(self, sublinear=False): def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if "model" not in self.__dict__: from yolox.models import YOLOX, YOLOPAFPN, YOLOXHead in_channels = [256, 512, 1024] # NANO model use depthwise = True, which is main difference. backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels, depthwise=True) head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels, depthwise=True) self.model = YOLOX(backbone, head) self.model.apply(init_yolo) self.model.head.initialize_biases(1e-2) return self.model ================================================ FILE: exps/default/yolov3.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. import os import torch import torch.nn as nn from yolox.exp import Exp as MyExp class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.depth = 1.0 self.width = 1.0 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] def get_model(self, sublinear=False): def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if "model" not in self.__dict__: from yolox.models import YOLOX, YOLOFPN, YOLOXHead backbone = YOLOFPN() head = YOLOXHead(self.num_classes, self.width, in_channels=[128, 256, 512], act="lrelu") self.model = YOLOX(backbone, head) self.model.apply(init_yolo) self.model.head.initialize_biases(1e-2) return self.model def get_data_loader(self, batch_size, is_distributed, no_aug=False): from data.datasets.cocodataset import COCODataset from data.datasets.mosaicdetection import MosaicDetection from data.datasets.data_augment import TrainTransform from data.datasets.dataloading import YoloBatchSampler, DataLoader, InfiniteSampler import torch.distributed as dist dataset = COCODataset( data_dir='data/COCO/', json_file=self.train_ann, img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=50 ), ) dataset = MosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=120 ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler(len(self.dataset), seed=self.seed if self.seed else 0) else: sampler = torch.utils.data.RandomSampler(self.dataset) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) return train_loader ================================================ FILE: exps/default/yolox_l.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. import os from yolox.exp import Exp as MyExp class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.depth = 1.0 self.width = 1.0 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] ================================================ FILE: exps/default/yolox_m.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. import os from yolox.exp import Exp as MyExp class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.depth = 0.67 self.width = 0.75 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] ================================================ FILE: exps/default/yolox_s.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. import os from yolox.exp import Exp as MyExp class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.depth = 0.33 self.width = 0.50 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] ================================================ FILE: exps/default/yolox_tiny.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. import os from yolox.exp import Exp as MyExp class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.depth = 0.33 self.width = 0.375 self.scale = (0.5, 1.5) self.random_size = (10, 20) self.test_size = (416, 416) self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] self.enable_mixup = False ================================================ FILE: exps/default/yolox_x.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. import os from yolox.exp import Exp as MyExp class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.depth = 1.33 self.width = 1.25 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] ================================================ FILE: exps/example/mot/yolox_x_diffusion_det_dancetrack.py ================================================ # encoding: utf-8 import os import random import torch import torch.nn as nn import torch.distributed as dist from torch.optim import AdamW from yolox.exp import Exp as MyExp from yolox.data import get_yolox_datadir class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.num_classes = 1 self.depth = 1.33 self.width = 1.25 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] self.train_ann = "train.json" self.val_ann = "train.json" self.input_size = (896, 1600) self.test_size = (896, 1600) self.random_size = (18, 32) self.max_epoch = 20 self.print_interval = 20 self.eval_interval = 40 self.no_aug_epochs = 5 self.basic_lr_per_img = 0.001 / 64.0 self.warmup_epochs = 1 self.task="detection" self.enable_mixup = True self.seed=8823 self.conf_thresh=0.4 self.det_thresh=0.7 self.nms_thresh2d=0.75 self.nms_thresh3d=0.7 self.interval=5 def get_data_loader(self, batch_size, is_distributed, no_aug=False): from yolox.data import ( MOTDataset, TrainTransform, YoloBatchSampler, DataLoader, InfiniteSampler, MosaicDetection, ) dataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "dancetrack"), json_file=self.train_ann, name='train', img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=500, ), ) dataset = MosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler( len(self.dataset), seed=self.seed if self.seed else 0 ) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug, ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) return train_loader def get_eval_loader(self, batch_size, is_distributed, testdev=False): from yolox.data import MOTDataset,DiffusionValTransform valdataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "dancetrack"), json_file=self.val_ann, img_size=self.test_size, name='train', preproc=DiffusionValTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ) ) if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = torch.utils.data.distributed.DistributedSampler( valdataset, shuffle=False ) else: sampler = torch.utils.data.SequentialSampler(valdataset) dataloader_kwargs = { "num_workers": self.data_num_workers, "pin_memory": True, "sampler": sampler, } dataloader_kwargs["batch_size"] = batch_size val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) return val_loader def get_evaluator(self, batch_size, is_distributed, testdev=False): from yolox.evaluators import COCOEvaluator val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev) evaluator = COCOEvaluator( dataloader=val_loader, img_size=self.test_size, confthre=self.conf_thresh, nmsthre3d=self.nms_thresh3d, detthre=self.det_thresh, nmsthre2d=self.nms_thresh2d, num_classes=self.num_classes, testdev=testdev, ) return evaluator def get_model(self): from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if getattr(self, "model", None) is None: in_channels = [256, 512, 1024] backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels) for value in backbone.parameters(): value.requires_grad=False head=DiffusionHead(self.num_classes,self.width) self.model = DiffusionNet(backbone, head) self.model.apply(init_yolo) # self.model.head.initialize_biases(1e-2) return self.model def get_optimizer(self, batch_size): lr=2.5e-05 weight_decay = 0.0001 self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) return self.optimizer ================================================ FILE: exps/example/mot/yolox_x_diffusion_det_mot17.py ================================================ # encoding: utf-8 import os import random import torch import torch.nn as nn import torch.distributed as dist from torch.optim import AdamW from yolox.exp import Exp as MyExp from yolox.data import get_yolox_datadir class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.num_classes = 1 self.depth = 1.33 self.width = 1.25 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] self.train_ann = "train.json" self.val_ann = "val_half.json" self.input_size = (800, 1440) self.test_size = (800, 1440) self.random_size = (18, 32) self.max_epoch = 30 self.print_interval = 20 self.eval_interval = 5 self.no_aug_epochs = 10 self.basic_lr_per_img = 0.001 / 64.0 self.warmup_epochs = 1 self.task="detection" self.enable_mixup = True self.seed=8823 self.conf_thresh=0.4 self.det_thresh=0.7 self.nms_thresh2d=0.75 self.nms_thresh3d=0.7 self.interval=5 def get_data_loader(self, batch_size, is_distributed, no_aug=False): from yolox.data import ( MOTDataset, TrainTransform, YoloBatchSampler, DataLoader, InfiniteSampler, MosaicDetection, ) dataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "mix_det"), json_file=self.train_ann, name='', img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=500, ), ) dataset = MosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler( len(self.dataset), seed=self.seed if self.seed else 0 ) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug, ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) return train_loader def get_eval_loader(self, batch_size, is_distributed, testdev=False): from yolox.data import MOTDataset,DiffusionValTransform valdataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "mot"), json_file=self.val_ann, img_size=self.test_size, name='train', preproc=DiffusionValTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ) ) if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = torch.utils.data.distributed.DistributedSampler( valdataset, shuffle=False ) else: sampler = torch.utils.data.SequentialSampler(valdataset) dataloader_kwargs = { "num_workers": self.data_num_workers, "pin_memory": True, "sampler": sampler, } dataloader_kwargs["batch_size"] = batch_size val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) return val_loader def get_evaluator(self, batch_size, is_distributed, testdev=False): from yolox.evaluators import COCOEvaluator val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev) evaluator = COCOEvaluator( dataloader=val_loader, img_size=self.test_size, confthre=self.conf_thresh, nmsthre3d=self.nms_thresh3d, detthre=self.det_thresh, nmsthre2d=self.nms_thresh2d, num_classes=self.num_classes, testdev=testdev, ) return evaluator def get_model(self): from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if getattr(self, "model", None) is None: in_channels = [256, 512, 1024] backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels) for value in backbone.parameters(): value.requires_grad=False head=DiffusionHead(self.num_classes,self.width) self.model = DiffusionNet(backbone, head) self.model.apply(init_yolo) # self.model.head.initialize_biases(1e-2) return self.model def get_optimizer(self, batch_size): lr=2.5e-05 weight_decay = 0.0001 self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) return self.optimizer ================================================ FILE: exps/example/mot/yolox_x_diffusion_det_mot17_ablation.py ================================================ # encoding: utf-8 import os import random import torch import torch.nn as nn import torch.distributed as dist from torch.optim import AdamW from yolox.exp import Exp as MyExp from yolox.data import get_yolox_datadir class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.num_classes = 1 self.depth = 1.33 self.width = 1.25 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] self.train_ann = "train_half.json" self.val_ann = "val_half.json" self.input_size = (800, 1440) self.test_size = (800, 1440) self.random_size = (18, 32) self.max_epoch = 30 self.print_interval = 20 self.eval_interval = 5 self.no_aug_epochs = 10 self.basic_lr_per_img = 0.001 / 64.0 self.warmup_epochs = 1 self.task="detection" self.enable_mixup = True self.seed=8823 self.conf_thresh=0.4 self.det_thresh=0.7 self.nms_thresh2d=0.75 self.nms_thresh3d=0.7 self.interval=5 def get_data_loader(self, batch_size, is_distributed, no_aug=False): from yolox.data import ( MOTDataset, TrainTransform, YoloBatchSampler, DataLoader, InfiniteSampler, MosaicDetection, ) dataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "mix_mot_ch"), json_file=self.train_ann, name='', img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=500, ), ) dataset = MosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler( len(self.dataset), seed=self.seed if self.seed else 0 ) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug, ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) return train_loader def get_eval_loader(self, batch_size, is_distributed, testdev=False): from yolox.data import MOTDataset,DiffusionValTransform valdataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "mot"), json_file=self.val_ann, img_size=self.test_size, name='train', preproc=DiffusionValTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ) ) if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = torch.utils.data.distributed.DistributedSampler( valdataset, shuffle=False ) else: sampler = torch.utils.data.SequentialSampler(valdataset) dataloader_kwargs = { "num_workers": self.data_num_workers, "pin_memory": True, "sampler": sampler, } dataloader_kwargs["batch_size"] = batch_size val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) return val_loader def get_evaluator(self, batch_size, is_distributed, testdev=False): from yolox.evaluators import COCOEvaluator val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev) evaluator = COCOEvaluator( dataloader=val_loader, img_size=self.test_size, confthre=self.conf_thresh, nmsthre3d=self.nms_thresh3d, detthre=self.det_thresh, nmsthre2d=self.nms_thresh2d, num_classes=self.num_classes, testdev=testdev, ) return evaluator def get_model(self): from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if getattr(self, "model", None) is None: in_channels = [256, 512, 1024] backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels) for value in backbone.parameters(): value.requires_grad=False head=DiffusionHead(self.num_classes,self.width) self.model = DiffusionNet(backbone, head) self.model.apply(init_yolo) # self.model.head.initialize_biases(1e-2) return self.model def get_optimizer(self, batch_size): lr=2.5e-05 weight_decay = 0.0001 self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) return self.optimizer ================================================ FILE: exps/example/mot/yolox_x_diffusion_det_mot20.py ================================================ # encoding: utf-8 import os import random import torch import torch.nn as nn import torch.distributed as dist from torch.optim import AdamW from yolox.exp import Exp as MyExp from yolox.data import get_yolox_datadir class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.num_classes = 1 self.depth = 1.33 self.width = 1.25 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] self.train_ann = "train.json" self.val_ann = "val_half.json" self.input_size = (896, 1600) self.test_size = (896, 1600) self.random_size = (20, 36) self.max_epoch = 30 self.print_interval = 20 self.eval_interval = 5 self.no_aug_epochs = 10 self.basic_lr_per_img = 0.001 / 64.0 self.warmup_epochs = 1 self.task="detection" self.enable_mixup = True self.seed=8823 self.conf_thresh=0.4 self.det_thresh=0.7 self.nms_thresh2d=0.75 self.nms_thresh3d=0.7 self.interval=5 def get_data_loader(self, batch_size, is_distributed, no_aug=False): from yolox.data import ( MOTDataset, TrainTransform, YoloBatchSampler, DataLoader, InfiniteSampler, MosaicDetection, ) dataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "mix_mot20_ch"), json_file=self.train_ann, name='', img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=500, ), ) dataset = MosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1200, ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler( len(self.dataset), seed=self.seed if self.seed else 0 ) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug, ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) return train_loader def get_eval_loader(self, batch_size, is_distributed, testdev=False): from yolox.data import MOTDataset,DiffusionValTransform valdataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "MOT20"), json_file=self.val_ann, img_size=self.test_size, name='train', preproc=DiffusionValTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1200, ) ) if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = torch.utils.data.distributed.DistributedSampler( valdataset, shuffle=False ) else: sampler = torch.utils.data.SequentialSampler(valdataset) dataloader_kwargs = { "num_workers": self.data_num_workers, "pin_memory": True, "sampler": sampler, } dataloader_kwargs["batch_size"] = batch_size val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) return val_loader def get_evaluator(self, batch_size, is_distributed, testdev=False): from yolox.evaluators import COCOEvaluator val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev) evaluator = COCOEvaluator( dataloader=val_loader, img_size=self.test_size, confthre=self.conf_thresh, nmsthre3d=self.nms_thresh3d, detthre=self.det_thresh, nmsthre2d=self.nms_thresh2d, num_classes=self.num_classes, testdev=testdev, ) return evaluator def get_model(self): from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if getattr(self, "model", None) is None: in_channels = [256, 512, 1024] backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels) for value in backbone.parameters(): value.requires_grad=False head=DiffusionHead(self.num_classes,self.width) self.model = DiffusionNet(backbone, head) self.model.apply(init_yolo) # self.model.head.initialize_biases(1e-2) return self.model def get_optimizer(self, batch_size): lr=2.5e-05 weight_decay = 0.0001 self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) return self.optimizer ================================================ FILE: exps/example/mot/yolox_x_diffusion_track_dancetrack.py ================================================ # encoding: utf-8 import os import random import torch import torch.nn as nn import torch.distributed as dist from torch.optim import AdamW from yolox.exp import Exp as MyExp from yolox.data import get_yolox_datadir class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.num_classes = 1 self.depth = 1.33 self.width = 1.25 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] self.train_ann = "train.json" self.val_ann = "train.json" self.input_size = (896, 1600) self.test_size = (896, 1600) self.random_size = (18, 32) self.max_epoch = 20 self.print_interval = 20 self.eval_interval = 40 self.no_aug_epochs = 5 self.basic_lr_per_img = 0.001 / 64.0 self.warmup_epochs = 1 self.task="tracking" self.seed=8823 self.conf_thresh=0.4 self.det_thresh=0.7 self.nms_thresh2d=0.75 self.nms_thresh3d=0.7 self.interval=5 def get_data_loader(self, batch_size, is_distributed, no_aug=False): from yolox.data import ( MOTDataset, TrainTransform, YoloBatchSampler, DataLoader, InfiniteSampler, MosaicDetection, DiffusionMosaicDetection, DiffusionTrainTransform ) dataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "dancetrack"), json_file=self.train_ann, name='train', img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=500, ), ) dataset = DiffusionMosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=DiffusionTrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler( len(self.dataset), seed=self.seed if self.seed else 0 ) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug, ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) return train_loader def get_eval_loader(self, batch_size, is_distributed, testdev=False): from yolox.data import MOTDataset,DiffusionValTransform valdataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "dancetrack"), json_file=self.val_ann, img_size=self.test_size, name='train', preproc=DiffusionValTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ) ) if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = torch.utils.data.distributed.DistributedSampler( valdataset, shuffle=False ) else: sampler = torch.utils.data.SequentialSampler(valdataset) dataloader_kwargs = { "num_workers": self.data_num_workers, "pin_memory": True, "sampler": sampler, } dataloader_kwargs["batch_size"] = batch_size val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) return val_loader def get_evaluator(self, batch_size, is_distributed, testdev=False): from yolox.evaluators import COCOEvaluator val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev) evaluator = COCOEvaluator( dataloader=val_loader, img_size=self.test_size, confthre=self.conf_thresh, nmsthre3d=self.nms_thresh3d, detthre=self.det_thresh, nmsthre2d=self.nms_thresh2d, num_classes=self.num_classes, testdev=testdev, ) return evaluator def get_model(self): from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if getattr(self, "model", None) is None: in_channels = [256, 512, 1024] backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels) for value in backbone.parameters(): value.requires_grad=False head=DiffusionHead(self.num_classes,self.width) self.model = DiffusionNet(backbone, head) self.model.apply(init_yolo) # self.model.head.initialize_biases(1e-2) return self.model def get_optimizer(self, batch_size): lr=2.5e-05 weight_decay = 0.0001 self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) return self.optimizer ================================================ FILE: exps/example/mot/yolox_x_diffusion_track_dancetrack_baseline.py ================================================ # encoding: utf-8 import os import random import torch import torch.nn as nn import torch.distributed as dist from torch.optim import AdamW from yolox.exp import Exp as MyExp from yolox.data import get_yolox_datadir class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.num_classes = 1 self.depth = 1.33 self.width = 1.25 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] self.train_ann = "train.json" self.val_ann = "test.json" self.input_size = (896, 1600) self.test_size = (896, 1600) self.random_size = (18, 32) self.max_epoch = 20 self.print_interval = 20 self.eval_interval = 40 self.no_aug_epochs = 5 self.basic_lr_per_img = 0.001 / 64.0 self.warmup_epochs = 1 self.task="tracking" self.seed=8823 self.conf_thresh=0.4 self.det_thresh=0.7 self.nms_thresh2d=0.75 self.nms_thresh3d=0.7 self.interval=5 def get_data_loader(self, batch_size, is_distributed, no_aug=False): from yolox.data import ( MOTDataset, TrainTransform, YoloBatchSampler, DataLoader, InfiniteSampler, MosaicDetection, DiffusionMosaicDetection, DiffusionTrainTransform ) dataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "dancetrack"), json_file=self.train_ann, name='train', img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=500, ), ) dataset = DiffusionMosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=DiffusionTrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler( len(self.dataset), seed=self.seed if self.seed else 0 ) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug, ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) return train_loader def get_eval_loader(self, batch_size, is_distributed, testdev=False): from yolox.data import MOTDataset,DiffusionValTransform valdataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "dancetrack"), json_file=self.val_ann, img_size=self.test_size, name='test', preproc=DiffusionValTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ) ) if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = torch.utils.data.distributed.DistributedSampler( valdataset, shuffle=False ) else: sampler = torch.utils.data.SequentialSampler(valdataset) dataloader_kwargs = { "num_workers": self.data_num_workers, "pin_memory": True, "sampler": sampler, } dataloader_kwargs["batch_size"] = batch_size val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) return val_loader def get_evaluator(self, batch_size, is_distributed, testdev=False): from yolox.evaluators import COCOEvaluator val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev) evaluator = COCOEvaluator( dataloader=val_loader, img_size=self.test_size, confthre=self.conf_thresh, nmsthre3d=self.nms_thresh3d, detthre=self.det_thresh, nmsthre2d=self.nms_thresh2d, num_classes=self.num_classes, testdev=testdev, ) return evaluator def get_model(self): from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if getattr(self, "model", None) is None: in_channels = [256, 512, 1024] backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels) for value in backbone.parameters(): value.requires_grad=False head=DiffusionHead(self.num_classes,self.width) self.model = DiffusionNet(backbone, head) self.model.apply(init_yolo) # self.model.head.initialize_biases(1e-2) return self.model def get_optimizer(self, batch_size): lr=2.5e-05 weight_decay = 0.0001 self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) return self.optimizer ================================================ FILE: exps/example/mot/yolox_x_diffusion_track_mot17.py ================================================ # encoding: utf-8 import os import random import torch import torch.nn as nn import torch.distributed as dist from torch.optim import AdamW from yolox.exp import Exp as MyExp from yolox.data import get_yolox_datadir class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.num_classes = 1 self.depth = 1.33 self.width = 1.25 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] self.train_ann = "train.json" self.val_ann = "val_half.json" self.input_size = (800, 1440) self.test_size = (800, 1440) self.random_size = (18, 32) self.max_epoch = 30 self.print_interval = 20 self.eval_interval = 5 self.no_aug_epochs = 10 self.basic_lr_per_img = 0.001 / 64.0 self.warmup_epochs = 1 self.task="tracking" self.seed=8823 self.conf_thresh=0.4 self.det_thresh=0.7 self.nms_thresh2d=0.75 self.nms_thresh3d=0.7 self.interval=5 def get_data_loader(self, batch_size, is_distributed, no_aug=False): from yolox.data import ( MOTDataset, TrainTransform, YoloBatchSampler, DataLoader, InfiniteSampler, MosaicDetection, DiffusionMosaicDetection, DiffusionTrainTransform ) dataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "mot"), json_file=self.train_ann, name='train', img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=500, ), ) dataset = DiffusionMosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=DiffusionTrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler( len(self.dataset), seed=self.seed if self.seed else 0 ) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug, ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) return train_loader def get_eval_loader(self, batch_size, is_distributed, testdev=False): from yolox.data import MOTDataset,DiffusionValTransform valdataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "mot"), json_file=self.val_ann, img_size=self.test_size, name='train', preproc=DiffusionValTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ) ) if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = torch.utils.data.distributed.DistributedSampler( valdataset, shuffle=False ) else: sampler = torch.utils.data.SequentialSampler(valdataset) dataloader_kwargs = { "num_workers": self.data_num_workers, "pin_memory": True, "sampler": sampler, } dataloader_kwargs["batch_size"] = batch_size val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) return val_loader def get_evaluator(self, batch_size, is_distributed, testdev=False): from yolox.evaluators import COCOEvaluator val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev) evaluator = COCOEvaluator( dataloader=val_loader, img_size=self.test_size, confthre=self.conf_thresh, nmsthre3d=self.nms_thresh3d, detthre=self.det_thresh, nmsthre2d=self.nms_thresh2d, num_classes=self.num_classes, testdev=testdev, ) return evaluator def get_model(self): from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if getattr(self, "model", None) is None: in_channels = [256, 512, 1024] backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels) for value in backbone.parameters(): value.requires_grad=False head=DiffusionHead(self.num_classes,self.width) self.model = DiffusionNet(backbone, head) self.model.apply(init_yolo) # self.model.head.initialize_biases(1e-2) return self.model def get_optimizer(self, batch_size): lr=2.5e-05 weight_decay = 0.0001 self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) return self.optimizer ================================================ FILE: exps/example/mot/yolox_x_diffusion_track_mot17_ablation.py ================================================ # encoding: utf-8 import os import random import torch import torch.nn as nn import torch.distributed as dist from torch.optim import AdamW from yolox.exp import Exp as MyExp from yolox.data import get_yolox_datadir class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.num_classes = 1 self.depth = 1.33 self.width = 1.25 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] self.train_ann = "train_half.json" self.val_ann = "val_half.json" self.input_size = (800, 1440) self.test_size = (800, 1440) self.random_size = (18, 32) self.max_epoch = 30 self.print_interval = 20 self.eval_interval = 5 self.no_aug_epochs = 10 self.basic_lr_per_img = 0.001 / 64.0 self.warmup_epochs = 1 self.task="tracking" self.enable_mixup = True self.seed=8823 self.conf_thresh=0.25 self.det_thresh=0.7 self.nms_thresh2d=0.75 self.nms_thresh3d=0.7 self.interval=5 def get_data_loader(self, batch_size, is_distributed, no_aug=False): from yolox.data import ( MOTDataset, TrainTransform, YoloBatchSampler, DataLoader, InfiniteSampler, MosaicDetection, DiffusionMosaicDetection, DiffusionTrainTransform ) dataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "mot"), json_file=self.train_ann, name='train', img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=500, ), ) dataset = DiffusionMosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=DiffusionTrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler( len(self.dataset), seed=self.seed if self.seed else 0 ) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug, ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) return train_loader def get_eval_loader(self, batch_size, is_distributed, testdev=False): from yolox.data import MOTDataset,DiffusionValTransform valdataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "mot"), json_file=self.val_ann, img_size=self.test_size, name='train', preproc=DiffusionValTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ) ) if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = torch.utils.data.distributed.DistributedSampler( valdataset, shuffle=False ) else: sampler = torch.utils.data.SequentialSampler(valdataset) dataloader_kwargs = { "num_workers": self.data_num_workers, "pin_memory": True, "sampler": sampler, } dataloader_kwargs["batch_size"] = batch_size val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) return val_loader def get_evaluator(self, batch_size, is_distributed, testdev=False): from yolox.evaluators import COCOEvaluator val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev) evaluator = COCOEvaluator( dataloader=val_loader, img_size=self.test_size, confthre=self.conf_thresh, nmsthre3d=self.nms_thresh3d, detthre=self.det_thresh, nmsthre2d=self.nms_thresh2d, num_classes=self.num_classes, testdev=testdev, ) return evaluator def get_model(self): from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if getattr(self, "model", None) is None: in_channels = [256, 512, 1024] backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels) for value in backbone.parameters(): value.requires_grad=False head=DiffusionHead(self.num_classes,self.width) self.model = DiffusionNet(backbone, head) self.model.apply(init_yolo) # self.model.head.initialize_biases(1e-2) return self.model def get_optimizer(self, batch_size): lr=2.5e-05 weight_decay = 0.0001 self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) return self.optimizer ================================================ FILE: exps/example/mot/yolox_x_diffusion_track_mot17_baseline.py ================================================ # encoding: utf-8 import os import random import torch import torch.nn as nn import torch.distributed as dist from torch.optim import AdamW from yolox.exp import Exp as MyExp from yolox.data import get_yolox_datadir class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.num_classes = 1 self.depth = 1.33 self.width = 1.25 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] self.train_ann = "train.json" self.val_ann = "test.json" self.input_size = (800, 1440) self.test_size = (800, 1440) self.random_size = (18, 32) self.max_epoch = 30 self.print_interval = 20 self.eval_interval = 5 self.no_aug_epochs = 10 self.basic_lr_per_img = 0.001 / 64.0 self.warmup_epochs = 1 self.task="tracking" self.seed=8823 self.conf_thresh=0.4 self.det_thresh=0.7 self.nms_thresh2d=0.75 self.nms_thresh3d=0.7 self.interval=5 def get_data_loader(self, batch_size, is_distributed, no_aug=False): from yolox.data import ( MOTDataset, TrainTransform, YoloBatchSampler, DataLoader, InfiniteSampler, MosaicDetection, DiffusionMosaicDetection, DiffusionTrainTransform ) dataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "mot"), json_file=self.train_ann, name='train', img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=500, ), ) dataset = DiffusionMosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=DiffusionTrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler( len(self.dataset), seed=self.seed if self.seed else 0 ) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug, ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) return train_loader def get_eval_loader(self, batch_size, is_distributed, testdev=False): from yolox.data import MOTDataset,DiffusionValTransform valdataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "mot"), json_file=self.val_ann, img_size=self.test_size, name='test', preproc=DiffusionValTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1000, ) ) if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = torch.utils.data.distributed.DistributedSampler( valdataset, shuffle=False ) else: sampler = torch.utils.data.SequentialSampler(valdataset) dataloader_kwargs = { "num_workers": self.data_num_workers, "pin_memory": True, "sampler": sampler, } dataloader_kwargs["batch_size"] = batch_size val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) return val_loader def get_evaluator(self, batch_size, is_distributed, testdev=False): from yolox.evaluators import COCOEvaluator val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev) evaluator = COCOEvaluator( dataloader=val_loader, img_size=self.test_size, confthre=self.conf_thresh, nmsthre3d=self.nms_thresh3d, detthre=self.det_thresh, nmsthre2d=self.nms_thresh2d, num_classes=self.num_classes, testdev=testdev, ) return evaluator def get_model(self): from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if getattr(self, "model", None) is None: in_channels = [256, 512, 1024] backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels) for value in backbone.parameters(): value.requires_grad=False head=DiffusionHead(self.num_classes,self.width) self.model = DiffusionNet(backbone, head) self.model.apply(init_yolo) # self.model.head.initialize_biases(1e-2) return self.model def get_optimizer(self, batch_size): lr=2.5e-05 weight_decay = 0.0001 self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) return self.optimizer ================================================ FILE: exps/example/mot/yolox_x_diffusion_track_mot20.py ================================================ # encoding: utf-8 import os import random import torch import torch.nn as nn import torch.distributed as dist from torch.optim import AdamW from yolox.exp import Exp as MyExp from yolox.data import get_yolox_datadir class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.num_classes = 1 self.depth = 1.33 self.width = 1.25 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] self.train_ann = "train.json" self.val_ann = "val_half.json" self.input_size = (896, 1600) self.test_size = (896, 1600) self.random_size = (20, 36) self.max_epoch = 30 self.print_interval = 20 self.eval_interval = 5 self.no_aug_epochs = 10 self.basic_lr_per_img = 0.001 / 64.0 self.warmup_epochs = 1 self.task="tracking" self.seed=8823 self.conf_thresh=0.4 self.det_thresh=0.7 self.nms_thresh2d=0.75 self.nms_thresh3d=0.7 self.interval=5 def get_data_loader(self, batch_size, is_distributed, no_aug=False): from yolox.data import ( MOTDataset, TrainTransform, YoloBatchSampler, DataLoader, InfiniteSampler, MosaicDetection, DiffusionMosaicDetection, DiffusionTrainTransform ) dataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "MOT20"), json_file=self.train_ann, name='train', img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=500, ), ) dataset = DiffusionMosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=DiffusionTrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1200, ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler( len(self.dataset), seed=self.seed if self.seed else 0 ) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug, ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) return train_loader def get_eval_loader(self, batch_size, is_distributed, testdev=False): from yolox.data import MOTDataset,DiffusionValTransform valdataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "MOT20"), json_file=self.val_ann, img_size=self.test_size, name='train', preproc=DiffusionValTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1200, ) ) if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = torch.utils.data.distributed.DistributedSampler( valdataset, shuffle=False ) else: sampler = torch.utils.data.SequentialSampler(valdataset) dataloader_kwargs = { "num_workers": self.data_num_workers, "pin_memory": True, "sampler": sampler, } dataloader_kwargs["batch_size"] = batch_size val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) return val_loader def get_evaluator(self, batch_size, is_distributed, testdev=False): from yolox.evaluators import COCOEvaluator val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev) evaluator = COCOEvaluator( dataloader=val_loader, img_size=self.test_size, confthre=self.conf_thresh, nmsthre3d=self.nms_thresh3d, detthre=self.det_thresh, nmsthre2d=self.nms_thresh2d, num_classes=self.num_classes, testdev=testdev, ) return evaluator def get_model(self): from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if getattr(self, "model", None) is None: in_channels = [256, 512, 1024] backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels) for value in backbone.parameters(): value.requires_grad=False head=DiffusionHead(self.num_classes,self.width) self.model = DiffusionNet(backbone, head) self.model.apply(init_yolo) # self.model.head.initialize_biases(1e-2) return self.model def get_optimizer(self, batch_size): lr=2.5e-05 weight_decay = 0.0001 self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) return self.optimizer ================================================ FILE: exps/example/mot/yolox_x_diffusion_track_mot20_baseline.py ================================================ # encoding: utf-8 import os import random import torch import torch.nn as nn import torch.distributed as dist from torch.optim import AdamW from yolox.exp import Exp as MyExp from yolox.data import get_yolox_datadir class Exp(MyExp): def __init__(self): super(Exp, self).__init__() self.num_classes = 1 self.depth = 1.33 self.width = 1.25 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] self.train_ann = "train.json" self.val_ann = "val_half.json" self.input_size = (896, 1600) self.test_size = (896, 1600) self.random_size = (20, 36) self.max_epoch = 30 self.print_interval = 20 self.eval_interval = 5 self.no_aug_epochs = 10 self.basic_lr_per_img = 0.001 / 64.0 self.warmup_epochs = 1 self.task="tracking" self.seed=8823 self.conf_thresh=0.4 self.det_thresh=0.7 self.nms_thresh2d=0.75 self.nms_thresh3d=0.7 self.interval=5 def get_data_loader(self, batch_size, is_distributed, no_aug=False): from yolox.data import ( MOTDataset, TrainTransform, YoloBatchSampler, DataLoader, InfiniteSampler, MosaicDetection, DiffusionMosaicDetection, DiffusionTrainTransform ) dataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "MOT20"), json_file=self.train_ann, name='train', img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=500, ), ) dataset = DiffusionMosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=DiffusionTrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1200, ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler( len(self.dataset), seed=self.seed if self.seed else 0 ) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug, ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) return train_loader def get_eval_loader(self, batch_size, is_distributed, testdev=False): from yolox.data import MOTDataset,DiffusionValTransform valdataset = MOTDataset( data_dir=os.path.join(get_yolox_datadir(), "MOT20"), json_file=self.val_ann, img_size=self.test_size, name='train', preproc=DiffusionValTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=1200, ) ) if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = torch.utils.data.distributed.DistributedSampler( valdataset, shuffle=False ) else: sampler = torch.utils.data.SequentialSampler(valdataset) dataloader_kwargs = { "num_workers": self.data_num_workers, "pin_memory": True, "sampler": sampler, } dataloader_kwargs["batch_size"] = batch_size val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) return val_loader def get_evaluator(self, batch_size, is_distributed, testdev=False): from yolox.evaluators import COCOEvaluator val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev) evaluator = COCOEvaluator( dataloader=val_loader, img_size=self.test_size, confthre=self.conf_thresh, nmsthre3d=self.nms_thresh3d, detthre=self.det_thresh, nmsthre2d=self.nms_thresh2d, num_classes=self.num_classes, testdev=testdev, ) return evaluator def get_model(self): from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if getattr(self, "model", None) is None: in_channels = [256, 512, 1024] backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels) for value in backbone.parameters(): value.requires_grad=False head=DiffusionHead(self.num_classes,self.width) self.model = DiffusionNet(backbone, head) self.model.apply(init_yolo) # self.model.head.initialize_biases(1e-2) return self.model def get_optimizer(self, batch_size): lr=2.5e-05 weight_decay = 0.0001 self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) return self.optimizer ================================================ FILE: requirements.txt ================================================ numpy torch>=1.7 opencv_python loguru scikit-image tqdm torchvision>=0.10.0 Pillow thop ninja tabulate tensorboard lap motmetrics filterpy h5py ================================================ FILE: setup.py ================================================ #!/usr/bin/env python # Copyright (c) Megvii, Inc. and its affiliates. All Rights Reserved import re import setuptools import glob from os import path import torch from torch.utils.cpp_extension import CppExtension torch_ver = [int(x) for x in torch.__version__.split(".")[:2]] assert torch_ver >= [1, 3], "Requires PyTorch >= 1.3" def get_extensions(): this_dir = path.dirname(path.abspath(__file__)) extensions_dir = path.join(this_dir, "yolox", "layers", "csrc") main_source = path.join(extensions_dir, "vision.cpp") sources = glob.glob(path.join(extensions_dir, "**", "*.cpp")) sources = [main_source] + sources extension = CppExtension extra_compile_args = {"cxx": ["-O3"]} define_macros = [] include_dirs = [extensions_dir] ext_modules = [ extension( "yolox._C", sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, ) ] return ext_modules with open("yolox/__init__.py", "r") as f: version = re.search( r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE ).group(1) # with open("README.md", "r") as f: # long_description = f.read() long_description="sss" setuptools.setup( name="yolox", version=version, author="basedet team", python_requires=">=3.6", long_description=long_description, ext_modules=get_extensions(), classifiers=["Programming Language :: Python :: 3", "Operating System :: OS Independent"], cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension}, packages=setuptools.find_namespace_packages(), ) ================================================ FILE: tools/convert_bdd100k_to_coco.py ================================================ import cv2 import os import json import tqdm import numpy as np labels_path = 'datasets/bdd100k/labels' img_path = 'datasets/bdd100k/images' # mot_labels_path = '/data/yourname/BDD100K-MOT/GT' out_path = 'datasets/bdd100k/annotations/' split = ['train'] categories = [ {"id": 1, "name": "pedestrian"}, {"id": 2, "name": "rider"}, {"id": 3, "name": "car"}, {"id": 4, "name": "truck"}, {"id": 5, "name": "bus"}, {"id": 6, "name": "train"}, {"id": 7, "name": "motorcycle"}, {"id": 8, "name": "bicycle"}, # {"id": 9, "name": "traffic light"}, # {"id": 10, "name": "traffic sign"}, ] # "traffic light":9, "traffic sign":10 cat = {"pedestrian":1, "rider":2, "car":3, "truck":4, "bus":5, "train":6, "motorcycle":7, "bicycle":8,} # 1: pedestrian # 2: rider # 3: car # 4: truck # 5: bus # 6: train # 7: motorcycle # 8: bicycle # 9: traffic light --- Don't need tracking # 10: traffic sign --- Don't need tracking # For MOT and MOTS, only the first 8 classes are used and evaluated def read_tid_num_per_video(video_ann_dir): anns = np.loadtxt(video_ann_dir, dtype=np.float32, delimiter=',') max_tid = max(anns[:, 1]) return int(max_tid) for s in split: img_id = 1; ann_id = 1; video_cnt = 0; tid_cnt = 0 images = []; annotations=[]; videos = [] all_video=[d for d in os.listdir(os.path.join(labels_path, s)) if '.json' in d] need_index=np.random.choice(range(len(all_video)),len(all_video)//3,replace=False) video_labels_list = [all_video[i] for i in need_index] for v_label in tqdm.tqdm(video_labels_list): video_cnt += 1 video = {'id': video_cnt, 'file_name':v_label[:-5]} videos.append(video) v_lab_path = os.path.join(os.path.join(labels_path, s, v_label)) with open(v_lab_path, 'r') as f: annos=json.load(f)# anns per video num_frames = len(annos)# the number of frames per video sign_cnt = 0 for ann in annos:# ann --- 每一帧的标注信息,这里放过了空白帧 img_name = os.path.join(img_path, s, ann['videoName'], ann['name']) img=cv2.imread(img_name) h,w,_ = img.shape img_info = { 'file_name':img_name, 'width':w, 'height':h, 'id': img_id, 'frame_id': ann['frameIndex'] + 1,# 严格按照 数据集 标记的帧indx 来进行排序,这将有利于 判断 相邻帧 之间的关系 'prev_image_id': -1 if ann['frameIndex'] == 0 else img_id - 1, 'next_image_id': -1 if ann['frameIndex'] == num_frames-1 else img_id + 1, 'video_id': video_cnt }# 所有的图像信息images中 ,这里也会添加空白标注帧的图像信息 images.append(img_info) for j, lab in enumerate(ann['labels']): # lab---每一个实例的标注信息 如果遇到空白标注帧--ann['labels']为空 则循环不执行 如果帧为非空 则继续执行此循环 if lab['category'] in cat:# 为了避免 'other vehicle' 类 pass else: continue track_id = lab['id'] if sign_cnt == 0 and j==0: firstid = track_id sign_cnt = 1 tid_curr = int(track_id) - int(firstid) + 1 tid_cnt+=1 is_crowd = lab['attributes']['crowd'] x1, y1, x2, y2=lab['box2d']['x1'], lab['box2d']['y1'], lab['box2d']['x2'], lab['box2d']['y2'] annotation = { 'image_id': img_id, 'conf': 1, 'bbox': [x1, y1, x2-x1, y2-y1], 'category_id': cat[lab['category']], 'id': ann_id, 'iscrowd': 1 if is_crowd else 0, 'track_id': tid_curr + tid_cnt, 'segmentation': [], 'area': (x2-x1)*(y2-y1), 'box_id':int(track_id) } annotations.append(annotation) ann_id += 1 img_id += 1 # tid_cnt += read_tid_num_per_video(os.path.join(mot_labels_path, s, v_label[:-5]+'.txt')) dataset_dict = {} dataset_dict["images"] = images dataset_dict["annotations"] = annotations dataset_dict["categories"] = categories dataset_dict["videos"] = videos json_str = json.dumps(dataset_dict) print(f' The number of detection objects is {ann_id - 1}, The number of detection imgs is {img_id -1} .') with open(out_path+f'{s}.json', 'w') as json_file: json_file.write(json_str) ================================================ FILE: tools/convert_cityperson_to_coco.py ================================================ import os import numpy as np import json from PIL import Image DATA_PATH = 'datasets/Cityscapes/' DATA_FILE_PATH = 'datasets/data_path/citypersons.train' OUT_PATH = DATA_PATH + 'annotations/' def load_paths(data_path): with open(data_path, 'r') as file: img_files = file.readlines() img_files = [x.replace('\n', '') for x in img_files] img_files = list(filter(lambda x: len(x) > 0, img_files)) label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt') for x in img_files] return img_files, label_files if __name__ == '__main__': if not os.path.exists(OUT_PATH): os.mkdir(OUT_PATH) out_path = OUT_PATH + 'train.json' out = {'images': [], 'annotations': [], 'categories': [{'id': 1, 'name': 'person'}]} img_paths, label_paths = load_paths(DATA_FILE_PATH) image_cnt = 0 ann_cnt = 0 video_cnt = 0 for img_path, label_path in zip(img_paths, label_paths): image_cnt += 1 im = Image.open(os.path.join("datasets", img_path)) image_info = {'file_name': img_path, 'id': image_cnt, 'height': im.size[1], 'width': im.size[0]} out['images'].append(image_info) # Load labels if os.path.isfile(os.path.join("datasets", label_path)): labels0 = np.loadtxt(os.path.join("datasets", label_path), dtype=np.float32).reshape(-1, 6) # Normalized xywh to pixel xyxy format labels = labels0.copy() labels[:, 2] = image_info['width'] * (labels0[:, 2] - labels0[:, 4] / 2) labels[:, 3] = image_info['height'] * (labels0[:, 3] - labels0[:, 5] / 2) labels[:, 4] = image_info['width'] * labels0[:, 4] labels[:, 5] = image_info['height'] * labels0[:, 5] else: labels = np.array([]) for i in range(len(labels)): ann_cnt += 1 fbox = labels[i, 2:6].tolist() ann = {'id': ann_cnt, 'category_id': 1, 'image_id': image_cnt, 'track_id': -1, 'bbox': fbox, 'area': fbox[2] * fbox[3], 'iscrowd': 0} out['annotations'].append(ann) print('loaded train for {} images and {} samples'.format(len(out['images']), len(out['annotations']))) json.dump(out, open(out_path, 'w')) ================================================ FILE: tools/convert_crowdhuman_to_coco.py ================================================ import os import numpy as np import json from PIL import Image DATA_PATH = 'datasets/crowdhuman/' OUT_PATH = DATA_PATH + 'annotations/' SPLITS = ['val', 'train'] DEBUG = False def load_func(fpath): print('fpath', fpath) assert os.path.exists(fpath) with open(fpath,'r') as fid: lines = fid.readlines() records =[json.loads(line.strip('\n')) for line in lines] return records if __name__ == '__main__': if not os.path.exists(OUT_PATH): os.mkdir(OUT_PATH) for split in SPLITS: data_path = DATA_PATH + split out_path = OUT_PATH + '{}.json'.format(split) out = {'images': [], 'annotations': [], 'categories': [{'id': 1, 'name': 'person'}]} ann_path = DATA_PATH + 'annotation_{}.odgt'.format(split) anns_data = load_func(ann_path) image_cnt = 0 ann_cnt = 0 video_cnt = 0 for ann_data in anns_data: image_cnt += 1 file_path = DATA_PATH + 'CrowdHuman_{}/Images/'.format(split) + '{}.jpg'.format(ann_data['ID']) im = Image.open(file_path) image_info = {'file_name': '{}.jpg'.format(ann_data['ID']), 'id': image_cnt, 'height': im.size[1], 'width': im.size[0]} out['images'].append(image_info) if split != 'test': anns = ann_data['gtboxes'] for i in range(len(anns)): ann_cnt += 1 fbox = anns[i]['fbox'] ann = {'id': ann_cnt, 'category_id': 1, 'image_id': image_cnt, 'track_id': -1, 'bbox_vis': anns[i]['vbox'], 'bbox': fbox, 'area': fbox[2] * fbox[3], 'iscrowd': 1 if 'extra' in anns[i] and \ 'ignore' in anns[i]['extra'] and \ anns[i]['extra']['ignore'] == 1 else 0} out['annotations'].append(ann) print('loaded {} for {} images and {} samples'.format(split, len(out['images']), len(out['annotations']))) json.dump(out, open(out_path, 'w')) ================================================ FILE: tools/convert_dancetrack_to_coco.py ================================================ import os import numpy as np import json import cv2 # Use the same script for MOT16 DATA_PATH = 'datasets/dancetrack' OUT_PATH = os.path.join(DATA_PATH, 'annotations') SPLITS = ['train','test'] # --> split training data to train_half and val_half. HALF_VIDEO = True CREATE_SPLITTED_ANN = True CREATE_SPLITTED_DET = True if __name__ == '__main__': if not os.path.exists(OUT_PATH): os.makedirs(OUT_PATH) for split in SPLITS: if split == "test": data_path = os.path.join(DATA_PATH, 'test') else: data_path = os.path.join(DATA_PATH, 'train') out_path = os.path.join(OUT_PATH, '{}.json'.format(split)) out = {'images': [], 'annotations': [], 'videos': [], 'categories': [{'id': 1, 'name': 'pedestrian'}]} seqs = os.listdir(data_path) image_cnt = 0 ann_cnt = 0 video_cnt = 0 tid_curr = 0 tid_last = -1 for seq in sorted(seqs): if '.DS_Store' in seq: continue if 'mot' in DATA_PATH and (split != 'test' and not ('FRCNN' in seq)): continue video_cnt += 1 # video sequence number. out['videos'].append({'id': video_cnt, 'file_name': seq}) seq_path = os.path.join(data_path, seq) img_path = os.path.join(seq_path, 'img1') ann_path = os.path.join(seq_path, 'gt/gt.txt') images = os.listdir(img_path) num_images = len([image for image in images if 'jpg' in image]) # half and half if HALF_VIDEO and ('half' in split): image_range = [0, num_images // 2] if 'train' in split else \ [num_images // 2 + 1, num_images - 1] else: image_range = [0, num_images - 1] for i in range(num_images): if i < image_range[0] or i > image_range[1]: continue img = cv2.imread(os.path.join(data_path, '{}/img1/{:08d}.jpg'.format(seq, i + 1))) height, width = img.shape[:2] image_info = {'file_name': '{}/img1/{:08d}.jpg'.format(seq, i + 1), # image name. 'id': image_cnt + i + 1, # image number in the entire training set. 'frame_id': i + 1 - image_range[0], # image number in the video sequence, starting from 1. 'prev_image_id': image_cnt + i if i > 0 else -1, # image number in the entire training set. 'next_image_id': image_cnt + i + 2 if i < num_images - 1 else -1, 'video_id': video_cnt, 'height': height, 'width': width} out['images'].append(image_info) print('{}: {} images'.format(seq, num_images)) if split != 'test': det_path = os.path.join(seq_path, 'det/det.txt') anns = np.loadtxt(ann_path, dtype=np.float32, delimiter=',') sorted_index=np.argsort(anns[:,1]) anns=anns[sorted_index] if ('half' in split): dets = np.loadtxt(det_path, dtype=np.float32, delimiter=',') if CREATE_SPLITTED_ANN and ('half' in split): anns_out = np.array([anns[i] for i in range(anns.shape[0]) if int(anns[i][0]) - 1 >= image_range[0] and int(anns[i][0]) - 1 <= image_range[1]], np.float32) anns_out[:, 0] -= image_range[0] gt_out = os.path.join(seq_path, 'gt/gt_{}.txt'.format(split)) fout = open(gt_out, 'w') for o in anns_out: fout.write('{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:.6f}\n'.format( int(o[0]), int(o[1]), int(o[2]), int(o[3]), int(o[4]), int(o[5]), int(o[6]), int(o[7]), o[8])) fout.close() if CREATE_SPLITTED_DET and ('half' in split): dets_out = np.array([dets[i] for i in range(dets.shape[0]) if int(dets[i][0]) - 1 >= image_range[0] and int(dets[i][0]) - 1 <= image_range[1]], np.float32) dets_out[:, 0] -= image_range[0] det_out = os.path.join(seq_path, 'det/det_{}.txt'.format(split)) dout = open(det_out, 'w') for o in dets_out: dout.write('{:d},{:d},{:.1f},{:.1f},{:.1f},{:.1f},{:.6f}\n'.format( int(o[0]), int(o[1]), float(o[2]), float(o[3]), float(o[4]), float(o[5]), float(o[6]))) dout.close() print('{} ann images'.format(int(anns[:, 0].max()))) for i in range(anns.shape[0]): frame_id = int(anns[i][0]) if frame_id - 1 < image_range[0] or frame_id - 1 > image_range[1]: continue track_id = int(anns[i][1]) # cat_id = int(anns[i][7]) ann_cnt += 1 category_id = 1 # pedestrian(non-static) if not track_id == tid_last: tid_curr += 1 tid_last = track_id ann = {'id': ann_cnt, 'category_id': category_id, 'image_id': image_cnt + frame_id, 'track_id': tid_curr, 'bbox': anns[i][2:6].tolist(), 'conf': 1, 'iscrowd': 0, 'area': float(anns[i][4] * anns[i][5])} out['annotations'].append(ann) image_cnt += num_images print(tid_curr, tid_last) print('loaded {} for {} images and {} samples'.format(split, len(out['images']), len(out['annotations']))) json.dump(out, open(out_path, 'w')) ================================================ FILE: tools/convert_ethz_to_coco.py ================================================ import os import numpy as np import json from PIL import Image DATA_PATH = 'datasets/ETHZ/' DATA_FILE_PATH = 'datasets/data_path/eth.train' OUT_PATH = DATA_PATH + 'annotations/' def load_paths(data_path): with open(data_path, 'r') as file: img_files = file.readlines() img_files = [x.replace('\n', '') for x in img_files] img_files = list(filter(lambda x: len(x) > 0, img_files)) label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt') for x in img_files] return img_files, label_files if __name__ == '__main__': if not os.path.exists(OUT_PATH): os.mkdir(OUT_PATH) out_path = OUT_PATH + 'train.json' out = {'images': [], 'annotations': [], 'categories': [{'id': 1, 'name': 'person'}]} img_paths, label_paths = load_paths(DATA_FILE_PATH) image_cnt = 0 ann_cnt = 0 video_cnt = 0 for img_path, label_path in zip(img_paths, label_paths): image_cnt += 1 im = Image.open(os.path.join("datasets", img_path)) image_info = {'file_name': img_path, 'id': image_cnt, 'height': im.size[1], 'width': im.size[0]} out['images'].append(image_info) # Load labels if os.path.isfile(os.path.join("datasets", label_path)): labels0 = np.loadtxt(os.path.join("datasets", label_path), dtype=np.float32).reshape(-1, 6) # Normalized xywh to pixel xyxy format labels = labels0.copy() labels[:, 2] = image_info['width'] * (labels0[:, 2] - labels0[:, 4] / 2) labels[:, 3] = image_info['height'] * (labels0[:, 3] - labels0[:, 5] / 2) labels[:, 4] = image_info['width'] * labels0[:, 4] labels[:, 5] = image_info['height'] * labels0[:, 5] else: labels = np.array([]) for i in range(len(labels)): ann_cnt += 1 fbox = labels[i, 2:6].tolist() ann = {'id': ann_cnt, 'category_id': 1, 'image_id': image_cnt, 'track_id': -1, 'bbox': fbox, 'area': fbox[2] * fbox[3], 'iscrowd': 0} out['annotations'].append(ann) print('loaded train for {} images and {} samples'.format(len(out['images']), len(out['annotations']))) json.dump(out, open(out_path, 'w')) ================================================ FILE: tools/convert_kitti_to_coco.py ================================================ from __future__ import absolute_import from __future__ import division from __future__ import print_function import pickle import json import numpy as np import os import cv2 DATA_PATH = 'datasets/KITTI/' OUT_PATH = 'datasets/KITTI/annotations' SPLITS = ['train'] VIDEO_SETS = {'train': range(21), 'test': range(29), 'train_half': range(21), 'val_half': range(21)} CREATE_HALF_LABEL = True DEBUG = False ''' #Values Name Description ---------------------------------------------------------------------------- 1 frame Frame within the sequence where the object appearers 1 track id Unique tracking id of this object within this sequence 1 type Describes the type of object: 'Car', 'Van', 'Truck', 'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram', 'Misc' or 'DontCare' 1 truncated Integer (0,1,2) indicating the level of truncation. Note that this is in contrast to the object detection benchmark where truncation is a float in [0,1]. 1 occluded Integer (0,1,2,3) indicating occlusion state: 0 = fully visible, 1 = partly occluded 2 = largely occluded, 3 = unknown 1 alpha Observation angle of object, ranging [-pi..pi] 4 bbox 2D bounding box of object in the image (0-based index): contains left, top, right, bottom pixel coordinates 3 dimensions 3D object dimensions: height, width, length (in meters) 3 location 3D object location x,y,z in camera coordinates (in meters) 1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi] 1 score Only for results: Float, indicating confidence in detection, needed for p/r curves, higher is better. ''' def project_to_image(pts_3d, P): # pts_3d: n x 3 # P: 3 x 4 # return: n x 2 pts_3d_homo = np.concatenate( [pts_3d, np.ones((pts_3d.shape[0], 1), dtype=np.float32)], axis=1) pts_2d = np.dot(P, pts_3d_homo.transpose(1, 0)).transpose(1, 0) pts_2d = pts_2d[:, :2] / pts_2d[:, 2:] return pts_2d def read_clib(calib_path): f = open(calib_path, 'r') for i, line in enumerate(f): if i == 2: calib = np.array(line.strip().split(' ')[1:], dtype=np.float32) calib = calib.reshape(3, 4) return calib def _bbox_to_coco_bbox(bbox): return [(bbox[0]), (bbox[1]), (bbox[2] - bbox[0]), (bbox[3] - bbox[1])] cats = ['Pedestrian', 'Car', 'Cyclist', 'Van', 'Truck', 'Person_sitting', 'Tram', 'Misc', 'DontCare'] cat_ids = {cat: i + 1 for i, cat in enumerate(cats)} cat_ids['Person'] = cat_ids['Person_sitting'] cat_info = [] for i, cat in enumerate(['pedestrian', 'car']): cat_info.append({'name': cat, 'id': i + 1}) if __name__ == '__main__': if not os.path.exists(OUT_PATH): os.mkdir(OUT_PATH) for split in SPLITS: ann_dir = DATA_PATH + '/label_02/' ret = {'images': [], 'annotations': [], "categories": cat_info, 'videos': []} num_images = 0 for i in VIDEO_SETS[split]: image_id_base = num_images video_name = '{:04d}'.format(i) ret['videos'].append({'id': i + 1, 'file_name': video_name}) ann_dir = 'train' if not ('test' in split) else split video_path = DATA_PATH + \ '{}ing/image_02/{}'.format(ann_dir, video_name) #calib_path = DATA_PATH + 'data_tracking_calib/{}ing/calib/'.format(ann_dir) \ # + '{}.txt'.format(video_name) #calib = read_clib(calib_path) image_files = sorted(os.listdir(video_path)) num_images_video = len(image_files) if CREATE_HALF_LABEL and 'half' in split: image_range = [0, num_images_video // 2 - 1] if split == 'train_half' else \ [num_images_video // 2, num_images_video - 1] else: image_range = [0, num_images_video - 1] print('num_frames', video_name, image_range[1] - image_range[0] + 1) for j, image_name in enumerate(image_files): if (j < image_range[0] or j > image_range[1]): continue num_images += 1 filen_name='training/image_02/{}/{:06d}.png'.format(video_name, j) data_path = os.path.join(DATA_PATH,filen_name) img = cv2.imread(data_path) height, width = img.shape[:2] image_info = {'file_name': filen_name, 'id': num_images, #'calib': calib.tolist(), 'video_id': i + 1, 'frame_id': j + 1 - image_range[0], 'prev_image_id': num_images-1, 'next_image_id': num_images+1, 'height': height, 'width': width} ret['images'].append(image_info) if split == 'test': continue # 0 -1 DontCare -1 -1 -10.000000 219.310000 188.490000 245.500000 218.560000 -1000.000000 -1000.000000 -1000.000000 -10.000000 -1.000000 -1.000000 -1.000000 ann_path = DATA_PATH + 'training/label_02/{}.txt'.format(video_name) anns = open(ann_path, 'r') if CREATE_HALF_LABEL and 'half' in split: label_out_folder = DATA_PATH + 'label_02_{}/'.format(split) label_out_path = label_out_folder + '{}.txt'.format(video_name) if not os.path.exists(label_out_folder): os.mkdir(label_out_folder) label_out_file = open(label_out_path, 'w') for ann_ind, txt in enumerate(anns): tmp = txt[:-1].split(' ') frame_id = int(tmp[0]) track_id = int(tmp[1]) cat_id = cat_ids[tmp[2]] # fillter person and car if cat_id not in [1,2]: continue truncated = int(float(tmp[3])) occluded = int(tmp[4]) alpha = float(tmp[5]) bbox = [float(tmp[6]), float(tmp[7]), float(tmp[8]), float(tmp[9])] dim = [float(tmp[10]), float(tmp[11]), float(tmp[12])] location = [float(tmp[13]), float(tmp[14]), float(tmp[15])] rotation_y = float(tmp[16]) #amodel_center = project_to_image( # np.array([location[0], location[1] - dim[0] / 2, location[2]], # np.float32).reshape(1, 3), calib)[0].tolist() ann = {'image_id': frame_id + 1 - image_range[0] + image_id_base, 'id': int(len(ret['annotations']) + 1), 'category_id': cat_id, 'dim': dim, 'bbox': _bbox_to_coco_bbox(bbox), 'depth': location[2], 'alpha': alpha, 'truncated': truncated, 'occluded': occluded, 'location': location, 'rotation_y': rotation_y, 'iscrowd':0, 'area': (bbox[2] - bbox[0])*(bbox[3] - bbox[1]), 'conf':1.0, #'amodel_center': amodel_center, 'track_id': track_id + 1, 'box_id': int(track_id + 1)} if CREATE_HALF_LABEL and 'half' in split: if (frame_id < image_range[0] or frame_id > image_range[1]): continue out_frame_id = frame_id - image_range[0] label_out_file.write('{} {}'.format( out_frame_id, txt[txt.find(' ') + 1:])) ret['annotations'].append(ann) print("# images: ", len(ret['images'])) print("# annotations: ", len(ret['annotations'])) out_path = '{}/{}.json'.format( OUT_PATH, split) json.dump(ret, open(out_path, 'w')) ================================================ FILE: tools/convert_mot17_to_coco.py ================================================ import os import numpy as np import json import cv2 # Use the same script for MOT16 DATA_PATH = 'datasets/mot' OUT_PATH = os.path.join(DATA_PATH, 'annotations') SPLITS = ['val_half','train_half',"train","test"] # --> split training data to train_half and val_half. HALF_VIDEO = True CREATE_SPLITTED_ANN = True CREATE_SPLITTED_DET = True if __name__ == '__main__': if not os.path.exists(OUT_PATH): os.makedirs(OUT_PATH) for split in SPLITS: if split == "test": data_path = os.path.join(DATA_PATH, 'test') else: data_path = os.path.join(DATA_PATH, 'train') out_path = os.path.join(OUT_PATH, '{}.json'.format(split)) out = {'images': [], 'annotations': [], 'videos': [], 'categories': [{'id': 1, 'name': 'pedestrian'}]} seqs = os.listdir(data_path) image_cnt = 0 ann_cnt = 0 video_cnt = 0 tid_curr = 0 tid_last = -1 for seq in sorted(seqs): if '.DS_Store' in seq: continue if 'mot' in DATA_PATH and (split != 'test' and not ('FRCNN' in seq)): continue video_cnt += 1 # video sequence number. out['videos'].append({'id': video_cnt, 'file_name': seq}) seq_path = os.path.join(data_path, seq) img_path = os.path.join(seq_path, 'img1') ann_path = os.path.join(seq_path, 'gt/gt.txt') images = os.listdir(img_path) num_images = len([image for image in images if 'jpg' in image]) # half and half if HALF_VIDEO and ('half' in split): image_range = [0, num_images // 2] if 'train' in split else \ [num_images // 2 + 1, num_images - 1] else: image_range = [0, num_images - 1] for i in range(num_images): if i < image_range[0] or i > image_range[1]: continue img = cv2.imread(os.path.join(data_path, '{}/img1/{:06d}.jpg'.format(seq, i + 1))) height, width = img.shape[:2] image_info = {'file_name': '{}/img1/{:06d}.jpg'.format(seq, i + 1), # image name. 'id': image_cnt + i + 1, # image number in the entire training set. 'frame_id': i + 1 - image_range[0], # image number in the video sequence, starting from 1. 'prev_image_id': image_cnt + i if i > 0 else -1, # image number in the entire training set. 'next_image_id': image_cnt + i + 2 if i < num_images - 1 else -1, 'video_id': video_cnt, 'height': height, 'width': width} out['images'].append(image_info) print('{}: {} images'.format(seq, num_images)) if split != 'test': det_path = os.path.join(seq_path, 'det/det.txt') anns = np.loadtxt(ann_path, dtype=np.float32, delimiter=',') dets = np.loadtxt(det_path, dtype=np.float32, delimiter=',') if CREATE_SPLITTED_ANN and ('half' in split): anns_out = np.array([anns[i] for i in range(anns.shape[0]) if int(anns[i][0]) - 1 >= image_range[0] and int(anns[i][0]) - 1 <= image_range[1]], np.float32) anns_out[:, 0] -= image_range[0] gt_out = os.path.join(seq_path, 'gt/gt_{}.txt'.format(split)) fout = open(gt_out, 'w') for o in anns_out: fout.write('{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:.6f}\n'.format( int(o[0]), int(o[1]), int(o[2]), int(o[3]), int(o[4]), int(o[5]), int(o[6]), int(o[7]), o[8])) fout.close() if CREATE_SPLITTED_DET and ('half' in split): dets_out = np.array([dets[i] for i in range(dets.shape[0]) if int(dets[i][0]) - 1 >= image_range[0] and int(dets[i][0]) - 1 <= image_range[1]], np.float32) dets_out[:, 0] -= image_range[0] det_out = os.path.join(seq_path, 'det/det_{}.txt'.format(split)) dout = open(det_out, 'w') for o in dets_out: dout.write('{:d},{:d},{:.1f},{:.1f},{:.1f},{:.1f},{:.6f}\n'.format( int(o[0]), int(o[1]), float(o[2]), float(o[3]), float(o[4]), float(o[5]), float(o[6]))) dout.close() print('{} ann images'.format(int(anns[:, 0].max()))) for i in range(anns.shape[0]): frame_id = int(anns[i][0]) if frame_id - 1 < image_range[0] or frame_id - 1 > image_range[1]: continue track_id = int(anns[i][1]) cat_id = int(anns[i][7]) ann_cnt += 1 if not ('15' in DATA_PATH): #if not (float(anns[i][8]) >= 0.25): # visibility. #continue if not (int(anns[i][6]) == 1): # whether ignore. continue if int(anns[i][7]) in [3, 4, 5, 6, 9, 10, 11]: # Non-person continue if int(anns[i][7]) in [2, 7, 8, 12]: # Ignored person category_id = -1 else: category_id = 1 # pedestrian(non-static) if not track_id == tid_last: tid_curr += 1 tid_last = track_id else: category_id = 1 ann = {'id': ann_cnt, 'category_id': category_id, 'image_id': image_cnt + frame_id, 'track_id': tid_curr, 'bbox': anns[i][2:6].tolist(), 'conf': float(anns[i][6]), 'iscrowd': 0, 'area': float(anns[i][4] * anns[i][5])} out['annotations'].append(ann) image_cnt += num_images print(tid_curr, tid_last) print('loaded {} for {} images and {} samples'.format(split, len(out['images']), len(out['annotations']))) json.dump(out, open(out_path, 'w')) ================================================ FILE: tools/convert_mot20_to_coco.py ================================================ import os import numpy as np import json import cv2 # Use the same script for MOT16 DATA_PATH = 'datasets/MOT20' OUT_PATH = os.path.join(DATA_PATH, 'annotations') SPLITS = ['train_half', 'val_half', 'train', 'test'] # --> split training data to train_half and val_half. HALF_VIDEO = True CREATE_SPLITTED_ANN = True CREATE_SPLITTED_DET = True if __name__ == '__main__': if not os.path.exists(OUT_PATH): os.makedirs(OUT_PATH) for split in SPLITS: if split == "test": data_path = os.path.join(DATA_PATH, 'test') else: data_path = os.path.join(DATA_PATH, 'train') out_path = os.path.join(OUT_PATH, '{}.json'.format(split)) out = {'images': [], 'annotations': [], 'videos': [], 'categories': [{'id': 1, 'name': 'pedestrian'}]} seqs = os.listdir(data_path) image_cnt = 0 ann_cnt = 0 video_cnt = 0 tid_curr = 0 tid_last = -1 for seq in sorted(seqs): if '.DS_Store' in seq: continue video_cnt += 1 # video sequence number. out['videos'].append({'id': video_cnt, 'file_name': seq}) seq_path = os.path.join(data_path, seq) img_path = os.path.join(seq_path, 'img1') ann_path = os.path.join(seq_path, 'gt/gt.txt') images = os.listdir(img_path) num_images = len([image for image in images if 'jpg' in image]) # half and half if HALF_VIDEO and ('half' in split): image_range = [0, num_images // 2] if 'train' in split else \ [num_images // 2 + 1, num_images - 1] else: image_range = [0, num_images - 1] for i in range(num_images): if i < image_range[0] or i > image_range[1]: continue img = cv2.imread(os.path.join(data_path, '{}/img1/{:06d}.jpg'.format(seq, i + 1))) height, width = img.shape[:2] image_info = {'file_name': '{}/img1/{:06d}.jpg'.format(seq, i + 1), # image name. 'id': image_cnt + i + 1, # image number in the entire training set. 'frame_id': i + 1 - image_range[0], # image number in the video sequence, starting from 1. 'prev_image_id': image_cnt + i if i > 0 else -1, # image number in the entire training set. 'next_image_id': image_cnt + i + 2 if i < num_images - 1 else -1, 'video_id': video_cnt, 'height': height, 'width': width} out['images'].append(image_info) print('{}: {} images'.format(seq, num_images)) if split != 'test': det_path = os.path.join(seq_path, 'det/det.txt') anns = np.loadtxt(ann_path, dtype=np.float32, delimiter=',') dets = np.loadtxt(det_path, dtype=np.float32, delimiter=',') if CREATE_SPLITTED_ANN and ('half' in split): anns_out = np.array([anns[i] for i in range(anns.shape[0]) if int(anns[i][0]) - 1 >= image_range[0] and int(anns[i][0]) - 1 <= image_range[1]], np.float32) anns_out[:, 0] -= image_range[0] gt_out = os.path.join(seq_path, 'gt/gt_{}.txt'.format(split)) fout = open(gt_out, 'w') for o in anns_out: fout.write('{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:.6f}\n'.format( int(o[0]), int(o[1]), int(o[2]), int(o[3]), int(o[4]), int(o[5]), int(o[6]), int(o[7]), o[8])) fout.close() if CREATE_SPLITTED_DET and ('half' in split): dets_out = np.array([dets[i] for i in range(dets.shape[0]) if int(dets[i][0]) - 1 >= image_range[0] and int(dets[i][0]) - 1 <= image_range[1]], np.float32) dets_out[:, 0] -= image_range[0] det_out = os.path.join(seq_path, 'det/det_{}.txt'.format(split)) dout = open(det_out, 'w') for o in dets_out: dout.write('{:d},{:d},{:.1f},{:.1f},{:.1f},{:.1f},{:.6f}\n'.format( int(o[0]), int(o[1]), float(o[2]), float(o[3]), float(o[4]), float(o[5]), float(o[6]))) dout.close() print('{} ann images'.format(int(anns[:, 0].max()))) for i in range(anns.shape[0]): frame_id = int(anns[i][0]) if frame_id - 1 < image_range[0] or frame_id - 1 > image_range[1]: continue track_id = int(anns[i][1]) cat_id = int(anns[i][7]) ann_cnt += 1 if not ('15' in DATA_PATH): #if not (float(anns[i][8]) >= 0.25): # visibility. #continue if not (int(anns[i][6]) == 1): # whether ignore. continue if int(anns[i][7]) in [3, 4, 5, 6, 9, 10, 11]: # Non-person continue if int(anns[i][7]) in [2, 7, 8, 12]: # Ignored person #category_id = -1 continue else: category_id = 1 # pedestrian(non-static) if not track_id == tid_last: tid_curr += 1 tid_last = track_id else: category_id = 1 ann = {'id': ann_cnt, 'category_id': category_id, 'image_id': image_cnt + frame_id, 'track_id': tid_curr, 'bbox': anns[i][2:6].tolist(), 'conf': float(anns[i][6]), 'iscrowd': 0, 'area': float(anns[i][4] * anns[i][5])} out['annotations'].append(ann) image_cnt += num_images print(tid_curr, tid_last) print('loaded {} for {} images and {} samples'.format(split, len(out['images']), len(out['annotations']))) json.dump(out, open(out_path, 'w')) ================================================ FILE: tools/convert_video.py ================================================ import cv2 def convert_video(video_path): cap = cv2.VideoCapture(video_path) width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float fps = cap.get(cv2.CAP_PROP_FPS) video_name = video_path.split('/')[-1].split('.')[0] save_name = video_name + '_converted' save_path = video_path.replace(video_name, save_name) vid_writer = cv2.VideoWriter( save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height)) ) while True: ret_val, frame = cap.read() if ret_val: vid_writer.write(frame) ch = cv2.waitKey(1) if ch == 27 or ch == ord("q") or ch == ord("Q"): break else: break if __name__ == "__main__": video_path = 'videos/palace.mp4' convert_video(video_path) ================================================ FILE: tools/mix_data_ablation.py ================================================ import json import os """ cd datasets mkdir -p mix_mot_ch/annotations cp mot/annotations/val_half.json mix_mot_ch/annotations/val_half.json cp mot/annotations/test.json mix_mot_ch/annotations/test.json cd mix_mot_ch ln -s ../mot/train mot_train ln -s ../crowdhuman/CrowdHuman_train crowdhuman_train ln -s ../crowdhuman/CrowdHuman_val crowdhuman_val cd .. """ mot_json = json.load(open('datasets/mot/annotations/train_half.json','r')) img_list = list() for img in mot_json['images']: img['file_name'] = 'mot_train/' + img['file_name'] img_list.append(img) ann_list = list() for ann in mot_json['annotations']: ann_list.append(ann) video_list = mot_json['videos'] category_list = mot_json['categories'] print('mot17') max_img = 10000 max_ann = 2000000 max_video = 10 crowdhuman_json = json.load(open('datasets/crowdhuman/annotations/train.json','r')) img_id_count = 0 for img in crowdhuman_json['images']: img_id_count += 1 img['file_name'] = 'crowdhuman_train/Images/' + img['file_name'] img['frame_id'] = img_id_count img['prev_image_id'] = img['id'] + max_img img['next_image_id'] = img['id'] + max_img img['id'] = img['id'] + max_img img['video_id'] = max_video img_list.append(img) for ann in crowdhuman_json['annotations']: ann['id'] = ann['id'] + max_ann ann['image_id'] = ann['image_id'] + max_img ann_list.append(ann) video_list.append({ 'id': max_video, 'file_name': 'crowdhuman_train' }) print('crowdhuman_train') max_img = 30000 max_ann = 10000000 crowdhuman_val_json = json.load(open('datasets/crowdhuman/annotations/val.json','r')) img_id_count = 0 for img in crowdhuman_val_json['images']: img_id_count += 1 img['file_name'] = 'crowdhuman_val/Images/' + img['file_name'] img['frame_id'] = img_id_count img['prev_image_id'] = img['id'] + max_img img['next_image_id'] = img['id'] + max_img img['id'] = img['id'] + max_img img['video_id'] = max_video img_list.append(img) for ann in crowdhuman_val_json['annotations']: ann['id'] = ann['id'] + max_ann ann['image_id'] = ann['image_id'] + max_img ann_list.append(ann) video_list.append({ 'id': max_video, 'file_name': 'crowdhuman_val' }) print('crowdhuman_val') mix_json = dict() mix_json['images'] = img_list mix_json['annotations'] = ann_list mix_json['videos'] = video_list mix_json['categories'] = category_list json.dump(mix_json, open('datasets/mix_mot_ch/annotations/train.json','w')) ================================================ FILE: tools/mix_data_bdd100k.py ================================================ import json import os import numpy as np """ cd datasets mkdir -p mix_det/annotations cp mot/annotations/val_half.json mix_det/annotations/val_half.json cp mot/annotations/test.json mix_det/annotations/test.json cd mix_det ln -s ../mot/train mot_train ln -s ../crowdhuman/CrowdHuman_train crowdhuman_train ln -s ../crowdhuman/CrowdHuman_val crowdhuman_val ln -s ../Cityscapes cp_train ln -s ../ETHZ ethz_train cd .. """ bdd100ktrain_json = json.load(open('datasets/bdd100k/annotations/mix_train_val.json','r')) # need_index=np.random.choice(range(len(bdd100ktrain_json['images'])),len(bdd100ktrain_json['images'])//3,replace=False) # need_img_ids={} img_list = list() for img in bdd100ktrain_json['images']: img['is_video']=1 img_list.append(img) # need_img_ids[bdd100ktrain_json['images'][img_idx]['id']]=1 ann_list = list() for ann in bdd100ktrain_json['annotations']: # if ann['image_id'] in need_img_ids: ann_list.append(ann) video_list = bdd100ktrain_json['videos'] category_list = bdd100ktrain_json['categories'] print('bdd100ktrain') max_img = len(img_list) max_ann = len(ann_list) max_video = len(video_list) bdd100kval_json = json.load(open('datasets/bdd100k/annotations/val.json','r')) for img in bdd100kval_json['images']: img['prev_image_id'] = img['prev_image_id'] + max_img img['next_image_id'] = img['next_image_id'] + max_img img['id'] = img['id'] + max_img img['video_id']+= max_video img['is_video']=1 img_list.append(img) for ann in bdd100kval_json['annotations']: ann['id'] = ann['id'] + max_ann ann['image_id'] = ann['image_id'] + max_img ann_list.append(ann) for vid in bdd100kval_json['videos']: vid['id']+=max_video video_list.append(vid) print('bdd100ktest') mix_json = dict() mix_json['images'] = img_list mix_json['annotations'] = ann_list mix_json['videos'] = video_list mix_json['categories'] = category_list json.dump(mix_json, open('datasets/bdd100k/annotations/mix_train_val.json','w')) ================================================ FILE: tools/mix_data_test_mot17.py ================================================ import json import os """ cd datasets mkdir -p mix_det/annotations cp mot/annotations/val_half.json mix_det/annotations/val_half.json cp mot/annotations/test.json mix_det/annotations/test.json cd mix_det ln -s ../mot/train mot_train ln -s ../crowdhuman/CrowdHuman_train crowdhuman_train ln -s ../crowdhuman/CrowdHuman_val crowdhuman_val ln -s ../Cityscapes cp_train ln -s ../ETHZ ethz_train cd .. """ mot_json = json.load(open('datasets/mot/annotations/train.json','r')) img_list = list() for img in mot_json['images']: img['file_name'] = 'mot_train/' + img['file_name'] img_list.append(img) ann_list = list() for ann in mot_json['annotations']: ann_list.append(ann) video_list = mot_json['videos'] category_list = mot_json['categories'] print('mot17') max_img = 10000 max_ann = 2000000 max_video = 10 crowdhuman_json = json.load(open('datasets/crowdhuman/annotations/train.json','r')) img_id_count = 0 for img in crowdhuman_json['images']: img_id_count += 1 img['file_name'] = 'crowdhuman_train/Images/' + img['file_name'] img['frame_id'] = img_id_count img['prev_image_id'] = img['id'] + max_img img['next_image_id'] = img['id'] + max_img img['id'] = img['id'] + max_img img['video_id'] = max_video img_list.append(img) for ann in crowdhuman_json['annotations']: ann['id'] = ann['id'] + max_ann ann['image_id'] = ann['image_id'] + max_img ann_list.append(ann) print('crowdhuman_train') video_list.append({ 'id': max_video, 'file_name': 'crowdhuman_train' }) max_img = 30000 max_ann = 10000000 crowdhuman_val_json = json.load(open('datasets/crowdhuman/annotations/val.json','r')) img_id_count = 0 for img in crowdhuman_val_json['images']: img_id_count += 1 img['file_name'] = 'crowdhuman_val/Images/' + img['file_name'] img['frame_id'] = img_id_count img['prev_image_id'] = img['id'] + max_img img['next_image_id'] = img['id'] + max_img img['id'] = img['id'] + max_img img['video_id'] = max_video img_list.append(img) for ann in crowdhuman_val_json['annotations']: ann['id'] = ann['id'] + max_ann ann['image_id'] = ann['image_id'] + max_img ann_list.append(ann) print('crowdhuman_val') video_list.append({ 'id': max_video, 'file_name': 'crowdhuman_val' }) max_img = 40000 max_ann = 20000000 ethz_json = json.load(open('datasets/ETHZ/annotations/train.json','r')) img_id_count = 0 for img in ethz_json['images']: img_id_count += 1 img['file_name'] = 'ethz_train/' + img['file_name'][5:] img['frame_id'] = img_id_count img['prev_image_id'] = img['id'] + max_img img['next_image_id'] = img['id'] + max_img img['id'] = img['id'] + max_img img['video_id'] = max_video img_list.append(img) for ann in ethz_json['annotations']: ann['id'] = ann['id'] + max_ann ann['image_id'] = ann['image_id'] + max_img ann_list.append(ann) print('ETHZ') video_list.append({ 'id': max_video, 'file_name': 'ethz' }) max_img = 50000 max_ann = 25000000 cp_json = json.load(open('datasets/Cityscapes/annotations/train.json','r')) img_id_count = 0 for img in cp_json['images']: img_id_count += 1 img['file_name'] = 'cp_train/' + img['file_name'][11:] img['frame_id'] = img_id_count img['prev_image_id'] = img['id'] + max_img img['next_image_id'] = img['id'] + max_img img['id'] = img['id'] + max_img img['video_id'] = max_video img_list.append(img) for ann in cp_json['annotations']: ann['id'] = ann['id'] + max_ann ann['image_id'] = ann['image_id'] + max_img ann_list.append(ann) print('Cityscapes') video_list.append({ 'id': max_video, 'file_name': 'cityperson' }) mix_json = dict() mix_json['images'] = img_list mix_json['annotations'] = ann_list mix_json['videos'] = video_list mix_json['categories'] = category_list json.dump(mix_json, open('datasets/mix_det/annotations/train.json','w')) ================================================ FILE: tools/mix_data_test_mot20.py ================================================ import json import os """ cd datasets mkdir -p mix_mot20_ch/annotations cp MOT20/annotations/val_half.json mix_mot20_ch/annotations/val_half.json cp MOT20/annotations/test.json mix_mot20_ch/annotations/test.json cd mix_mot20_ch ln -s ../MOT20/train mot20_train ln -s ../crowdhuman/CrowdHuman_train crowdhuman_train ln -s ../crowdhuman/CrowdHuman_val crowdhuman_val cd .. """ mot_json = json.load(open('datasets/MOT20/annotations/train.json','r')) img_list = list() for img in mot_json['images']: img['file_name'] = 'mot20_train/' + img['file_name'] img_list.append(img) ann_list = list() for ann in mot_json['annotations']: ann_list.append(ann) video_list = mot_json['videos'] category_list = mot_json['categories'] max_img = 10000 max_ann = 2000000 max_video = 10 crowdhuman_json = json.load(open('datasets/crowdhuman/annotations/train.json','r')) img_id_count = 0 for img in crowdhuman_json['images']: img_id_count += 1 img['file_name'] = 'crowdhuman_train/Images/' + img['file_name'] img['frame_id'] = img_id_count img['prev_image_id'] = img['id'] + max_img img['next_image_id'] = img['id'] + max_img img['id'] = img['id'] + max_img img['video_id'] = max_video img_list.append(img) for ann in crowdhuman_json['annotations']: ann['id'] = ann['id'] + max_ann ann['image_id'] = ann['image_id'] + max_img ann_list.append(ann) video_list.append({ 'id': max_video, 'file_name': 'crowdhuman_train' }) max_img = 30000 max_ann = 10000000 crowdhuman_val_json = json.load(open('datasets/crowdhuman/annotations/val.json','r')) img_id_count = 0 for img in crowdhuman_val_json['images']: img_id_count += 1 img['file_name'] = 'crowdhuman_val/Images/' + img['file_name'] img['frame_id'] = img_id_count img['prev_image_id'] = img['id'] + max_img img['next_image_id'] = img['id'] + max_img img['id'] = img['id'] + max_img img['video_id'] = max_video img_list.append(img) for ann in crowdhuman_val_json['annotations']: ann['id'] = ann['id'] + max_ann ann['image_id'] = ann['image_id'] + max_img ann_list.append(ann) video_list.append({ 'id': max_video, 'file_name': 'crowdhuman_val' }) mix_json = dict() mix_json['images'] = img_list mix_json['annotations'] = ann_list mix_json['videos'] = video_list mix_json['categories'] = category_list json.dump(mix_json, open('datasets/mix_mot20_ch/annotations/train.json','w')) ================================================ FILE: tools/mota.py ================================================ from loguru import logger import numpy as np np.float = float np.int = int np.object = object np.bool = bool import torch import torch.backends.cudnn as cudnn from torch.nn.parallel import DistributedDataParallel as DDP import sys import os prj_path = os.path.join(os.path.dirname(__file__), '..') if prj_path not in sys.path: sys.path.append(prj_path) from yolox.core import launch from yolox.exp import get_exp from yolox.utils import configure_nccl, fuse_model, get_local_rank, get_model_info, setup_logger import argparse import os import random import warnings import glob import motmetrics as mm from collections import OrderedDict from pathlib import Path def compare_dataframes(gts, ts): accs = [] names = [] for k, tsacc in ts.items(): if k in gts: logger.info('Comparing {}...'.format(k)) accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5)) names.append(k) else: logger.warning('No ground truth for {}, skipping.'.format(k)) return accs, names # evaluate MOTA results_folder = 'DiffusionTrack_outputs/yolox_x_diffusion_track_mot17_ablation/track_results_mot17_ablation_1_500' mm.lap.default_solver = 'lap' gt_type = '_val_half' #gt_type = '' print('gt_type', gt_type) gtfiles = glob.glob( os.path.join('datasets/mot/train', '*/gt/gt{}.txt'.format(gt_type))) print('gt_files', gtfiles) tsfiles = [f for f in glob.glob(os.path.join(results_folder, '*.txt')) if not os.path.basename(f).startswith('eval')] logger.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles))) logger.info('Available LAP solvers {}'.format(mm.lap.available_solvers)) logger.info('Default LAP solver \'{}\''.format(mm.lap.default_solver)) logger.info('Loading files.') gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=1)) for f in gtfiles]) ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=-1.0)) for f in tsfiles]) mh = mm.metrics.create() accs, names = compare_dataframes(gt, ts) logger.info('Running metrics') metrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked', 'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects'] summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True) # summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True) # print(mm.io.render_summary( # summary, formatters=mh.formatters, # namemap=mm.io.motchallenge_metric_names)) div_dict = { 'num_objects': ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations'], 'num_unique_objects': ['mostly_tracked', 'partially_tracked', 'mostly_lost']} for divisor in div_dict: for divided in div_dict[divisor]: summary[divided] = (summary[divided] / summary[divisor]) fmt = mh.formatters change_fmt_list = ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mostly_tracked', 'partially_tracked', 'mostly_lost'] for k in change_fmt_list: fmt[k] = fmt['mota'] print(mm.io.render_summary(summary, formatters=fmt, namemap=mm.io.motchallenge_metric_names)) metrics = mm.metrics.motchallenge_metrics + ['num_objects'] summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True) print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names)) logger.info('Completed') ================================================ FILE: tools/track.py ================================================ from loguru import logger import numpy as np np.float = float np.int = int np.object = object np.bool = bool import sys import os prj_path = os.path.join(os.path.dirname(__file__), '..') if prj_path not in sys.path: sys.path.append(prj_path) import torch import torch.backends.cudnn as cudnn from torch.nn.parallel import DistributedDataParallel as DDP from yolox.core import launch from yolox.exp import get_exp from yolox.utils import configure_nccl, fuse_model, get_local_rank, get_model_info, setup_logger from yolox.evaluators import DiffusionMOTEvaluatorKL import argparse import os import random import warnings import glob import motmetrics as mm from collections import OrderedDict from pathlib import Path def make_parser(): parser = argparse.ArgumentParser("YOLOX Eval") parser.add_argument("-expn", "--experiment-name", type=str, default=None) parser.add_argument("-n", "--name", type=str, default=None, help="model name") # distributed parser.add_argument( "--dist-backend", default="nccl", type=str, help="distributed backend" ) parser.add_argument( "--dist-url", default=None, type=str, help="url used to set up distributed training", ) parser.add_argument("-b", "--batch-size", type=int, default=1, help="batch size") parser.add_argument( "-d", "--devices", default=1, type=int, help="device for training" ) parser.add_argument( "--local_rank", default=0, type=int, help="local rank for dist training" ) parser.add_argument( "--num_machines", default=1, type=int, help="num of node for training" ) parser.add_argument( "--machine_rank", default=0, type=int, help="node rank for multi-node training" ) parser.add_argument( "-f", "--exp_file", default="exps/example/mot/yolox_x_diffusion_track_dancetrack.py", type=str, help="pls input your expriment description file", ) parser.add_argument( "--fp16", dest="fp16", default=False, action="store_true", help="Adopting mix precision evaluating.", ) parser.add_argument( "--fuse", dest="fuse", default=True, action="store_true", help="Fuse conv and bn for testing.", ) parser.add_argument( "--trt", dest="trt", default=False, action="store_true", help="Using TensorRT model for testing.", ) parser.add_argument( "--test", dest="test", default=False, action="store_true", help="Evaluating on test-dev set.", ) parser.add_argument( "--speed", dest="speed", default=False, action="store_true", help="speed test only.", ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) parser.add_argument("-c", "--ckpt", default="diffusiontrack_dancetrack.pth.tar", type=str, help="ckpt for eval") parser.add_argument("--tsize", default=None, type=int, help="test img size") parser.add_argument("--seed", default=8823, type=int, help="eval seed") # det args parser.add_argument("--det_thresh", default=0.7, type=float, help="detection conf") parser.add_argument("--nms2d", default=0.75, type=float, help="detection nms threshold") # tracking args parser.add_argument("--conf_thresh", type=float, default=0.25, help="tracking confidence threshold") parser.add_argument("--nms3d", default=0.7, type=float, help="association nms threshold") parser.add_argument("--interval", default=5, type=int, help="relink interval") parser.add_argument("--min-box-area", type=float, default=100, help='filter out tiny boxes') parser.add_argument("--mot20", dest="mot20", default=False, action="store_true", help="test mot20.") return parser def compare_dataframes(gts, ts): accs = [] names = [] for k, tsacc in ts.items(): if k in gts: logger.info('Comparing {}...'.format(k)) accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5)) names.append(k) else: logger.warning('No ground truth for {}, skipping.'.format(k)) return accs, names @logger.catch def main(exp, args, num_gpu): if args.seed is not None: random.seed(args.seed) torch.manual_seed(args.seed) cudnn.deterministic = True warnings.warn( "You have chosen to seed testing. This will turn on the CUDNN deterministic setting, " ) is_distributed = num_gpu > 1 # set environment variables for distributed training cudnn.benchmark = True rank = args.local_rank # rank = get_local_rank() file_name = os.path.join(exp.output_dir, args.experiment_name) if rank == 0: os.makedirs(file_name, exist_ok=True) results_folder = os.path.join(file_name, "track_results_mot20_test") os.makedirs(results_folder, exist_ok=True) setup_logger(file_name, distributed_rank=rank, filename="val_log.txt", mode="a") logger.info("Args: {}".format(args)) if args.conf_thresh is not None: exp.conf_thresh = args.conf_thresh if args.nms2d is not None: exp.nms_thresh2d = args.nms2d if args.det_thresh is not None: exp.det_thresh = args.det_thresh if args.nms3d is not None: exp.nms_thresh3d = args.nms3d if args.interval is not None: exp.interval=args.interval if args.tsize is not None: exp.test_size = (args.tsize, args.tsize) model = exp.get_model() # logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size))) #logger.info("Model Structure:\n{}".format(str(model))) val_loader = exp.get_eval_loader(args.batch_size, is_distributed, args.test) evaluator = DiffusionMOTEvaluatorKL( args=args, dataloader=val_loader, img_size=exp.test_size, confthre=exp.conf_thresh, nmsthre3d=exp.nms_thresh3d, detthre=exp.det_thresh, nmsthre2d=exp.nms_thresh2d, interval=exp.interval, num_classes=exp.num_classes, ) torch.cuda.set_device(rank) model.cuda(rank) model.eval() if not args.speed and not args.trt: if args.ckpt is None: ckpt_file = os.path.join(file_name, "best_ckpt.pth.tar") else: ckpt_file = args.ckpt logger.info("loading checkpoint") loc = "cuda:{}".format(rank) ckpt = torch.load(ckpt_file, map_location=loc) # load the model state dict model.load_state_dict(ckpt["model"]) logger.info("loaded checkpoint done.") if is_distributed: model = DDP(model, device_ids=[rank]) if args.fuse: logger.info("\tFusing model...") model = fuse_model(model) if args.trt: assert ( not args.fuse and not is_distributed and args.batch_size == 1 ), "TensorRT model is not support model fusing and distributed inferencing!" trt_file = os.path.join(file_name, "model_trt.pth") assert os.path.exists( trt_file ), "TensorRT model is not found!\n Run tools/trt.py first!" model.head.decode_in_inference = False decoder = model.head.decode_outputs else: trt_file = None decoder = None # start evaluate *_, summary = evaluator.evaluate( model, is_distributed, args.fp16, trt_file, decoder, exp.test_size, results_folder ) logger.info("\n" + summary) # evaluate MOTA mm.lap.default_solver = 'lap' if exp.val_ann == 'val_half.json': gt_type = '_val_half' else: gt_type = '' print('gt_type', gt_type) if args.mot20: gtfiles = glob.glob(os.path.join('datasets/MOT20/train', '*/gt/gt{}.txt'.format(gt_type))) else: gtfiles = glob.glob(os.path.join('datasets/mot/train', '*/gt/gt{}.txt'.format(gt_type))) print('gt_files', gtfiles) tsfiles = [f for f in glob.glob(os.path.join(results_folder, '*.txt')) if not os.path.basename(f).startswith('eval')] logger.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles))) logger.info('Available LAP solvers {}'.format(mm.lap.available_solvers)) logger.info('Default LAP solver \'{}\''.format(mm.lap.default_solver)) logger.info('Loading files.') gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=1)) for f in gtfiles]) ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=-1)) for f in tsfiles]) mh = mm.metrics.create() accs, names = compare_dataframes(gt, ts) logger.info('Running metrics') metrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked', 'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects'] summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True) # summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True) # print(mm.io.render_summary( # summary, formatters=mh.formatters, # namemap=mm.io.motchallenge_metric_names)) div_dict = { 'num_objects': ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations'], 'num_unique_objects': ['mostly_tracked', 'partially_tracked', 'mostly_lost']} for divisor in div_dict: for divided in div_dict[divisor]: summary[divided] = (summary[divided] / summary[divisor]) fmt = mh.formatters change_fmt_list = ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mostly_tracked', 'partially_tracked', 'mostly_lost'] for k in change_fmt_list: fmt[k] = fmt['mota'] print(mm.io.render_summary(summary, formatters=fmt, namemap=mm.io.motchallenge_metric_names)) metrics = mm.metrics.motchallenge_metrics + ['num_objects'] summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True) print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names)) logger.info('Completed') if __name__ == "__main__": args = make_parser().parse_args() exp = get_exp(args.exp_file, args.name) exp.merge(args.opts) if not args.experiment_name: args.experiment_name = exp.exp_name num_gpu = torch.cuda.device_count() if args.devices is None else args.devices assert num_gpu <= torch.cuda.device_count() launch( main, num_gpu, args.num_machines, args.machine_rank, backend=args.dist_backend, dist_url=args.dist_url, args=(exp, args, num_gpu), ) ================================================ FILE: tools/train.py ================================================ from loguru import logger import numpy as np np.float = float np.int = int np.object = object np.bool = bool import torch import torch.backends.cudnn as cudnn import os # os.environ["CUDA_VISIBLE_DEVICES"]="2,3,4,5,6,7" import sys prj_path = os.path.join(os.path.dirname(__file__), '..') if prj_path not in sys.path: sys.path.append(prj_path) from yolox.core import Trainer, launch from yolox.exp import get_exp import argparse import random import warnings def make_parser(): parser = argparse.ArgumentParser("YOLOX train parser") parser.add_argument("-expn", "--experiment-name", type=str, default=None) parser.add_argument("-n", "--name", type=str, default=None, help="model name") # distributed parser.add_argument( "--dist-backend", default="nccl", type=str, help="distributed backend" ) parser.add_argument( "--dist-url", default=None, type=str, help="url used to set up distributed training", ) parser.add_argument("-b", "--batch-size", type=int, default=2*8, help="batch size") parser.add_argument( "-d", "--devices", default=8, type=int, help="device for training" ) parser.add_argument( "--local_rank", default=0, type=int, help="local rank for dist training" ) parser.add_argument( "-f", "--exp_file", default="exps/example/mot/yolox_x_diffusion_track_dancetrack_baseline.py", type=str, help="plz input your expriment description file", ) parser.add_argument( "--resume", default=False, action="store_true", help="resume training" ) parser.add_argument("-c", "--ckpt", default="diffusion_dancetrack_det.pth.tar", type=str, help="checkpoint file") parser.add_argument( "-e", "--start_epoch", default=None, type=int, help="resume training start epoch", ) parser.add_argument( "--num_machines", default=1, type=int, help="num of node for training" ) parser.add_argument( "--machine_rank", default=0, type=int, help="node rank for multi-node training" ) parser.add_argument( "--fp16", dest="fp16", default=False, action="store_true", help="Adopting mix precision training.", ) parser.add_argument( "-o", "--occupy", dest="occupy", default=False, action="store_true", help="occupy GPU memory first for training.", ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) return parser @logger.catch def main(exp, args): if exp.seed is not None: random.seed(exp.seed) torch.manual_seed(exp.seed) cudnn.deterministic = True warnings.warn( "You have chosen to seed training. This will turn on the CUDNN deterministic setting, " "which can slow down your training considerably! You may see unexpected behavior " "when restarting from checkpoints." ) # set environment variables for distributed training cudnn.benchmark = True trainer = Trainer(exp, args) trainer.train() if __name__ == "__main__": args = make_parser().parse_args() # args.exp_file=f # args.ckpt=c exp = get_exp(args.exp_file, args.name) exp.merge(args.opts) if not args.experiment_name: args.experiment_name = exp.exp_name num_gpu = torch.cuda.device_count() if args.devices is None else args.devices assert num_gpu <= torch.cuda.device_count() launch( main, num_gpu, args.num_machines, args.machine_rank, backend=args.dist_backend, dist_url=args.dist_url, args=(exp, args), ) ================================================ FILE: tools/txt2video.py ================================================ import os import sys import json import cv2 import glob as gb import numpy as np def colormap(rgb=False): color_list = np.array( [ 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286, 0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714, 0.714, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000 ] ).astype(np.float32) color_list = color_list.reshape((-1, 3)) * 255 if not rgb: color_list = color_list[:, ::-1] return color_list def txt2img(visual_path="visual_val_gt"): print("Starting txt2img") valid_labels = {1} ignore_labels = {2, 7, 8, 12} if not os.path.exists(visual_path): os.makedirs(visual_path) color_list = colormap() gt_json_path = 'datasets/mot/annotations/val_half.json' img_path = 'datasets/mot/train/' show_video_names = ['MOT17-02-FRCNN', 'MOT17-04-FRCNN', 'MOT17-05-FRCNN', 'MOT17-09-FRCNN', 'MOT17-10-FRCNN', 'MOT17-11-FRCNN', 'MOT17-13-FRCNN'] test_json_path = 'datasets/mot/annotations/test.json' test_img_path = 'datasets/mot/test/' test_show_video_names = ['MOT17-01-FRCNN', 'MOT17-03-FRCNN', 'MOT17-06-FRCNN', 'MOT17-07-FRCNN', 'MOT17-08-FRCNN', 'MOT17-12-FRCNN', 'MOT17-14-FRCNN'] if visual_path == "visual_test_predict": show_video_names = test_show_video_names img_path = test_img_path gt_json_path = test_json_path for show_video_name in show_video_names: img_dict = dict() if visual_path == "visual_val_gt": txt_path = 'datasets/mot/train/' + show_video_name + '/gt/gt_val_half.txt' elif visual_path == "visual_yolox_x": txt_path = 'YOLOX_outputs/yolox_mot_x_1088/track_results/'+ show_video_name + '.txt' elif visual_path == "visual_test_predict": txt_path = 'test/tracks/'+ show_video_name + '.txt' else: raise NotImplementedError with open(gt_json_path, 'r') as f: gt_json = json.load(f) for ann in gt_json["images"]: file_name = ann['file_name'] video_name = file_name.split('/')[0] if video_name == show_video_name: img_dict[ann['frame_id']] = img_path + file_name txt_dict = dict() with open(txt_path, 'r') as f: for line in f.readlines(): linelist = line.split(',') mark = int(float(linelist[6])) label = int(float(linelist[7])) vis_ratio = float(linelist[8]) if visual_path == "visual_val_gt": if mark == 0 or label not in valid_labels or label in ignore_labels or vis_ratio <= 0: continue img_id = linelist[0] obj_id = linelist[1] bbox = [float(linelist[2]), float(linelist[3]), float(linelist[2]) + float(linelist[4]), float(linelist[3]) + float(linelist[5]), int(obj_id)] if int(img_id) in txt_dict: txt_dict[int(img_id)].append(bbox) else: txt_dict[int(img_id)] = list() txt_dict[int(img_id)].append(bbox) for img_id in sorted(txt_dict.keys()): img = cv2.imread(img_dict[img_id]) for bbox in txt_dict[img_id]: cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color_list[bbox[4]%79].tolist(), thickness=2) cv2.putText(img, "{}".format(int(bbox[4])), (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color_list[bbox[4]%79].tolist(), 2) cv2.imwrite(visual_path + "/" + show_video_name + "{:0>6d}.png".format(img_id), img) print(show_video_name, "Done") print("txt2img Done") def img2video(visual_path="visual_val_gt"): print("Starting img2video") img_paths = gb.glob(visual_path + "/*.png") fps = 16 size = (1920,1080) videowriter = cv2.VideoWriter(visual_path + "_video.avi",cv2.VideoWriter_fourcc('M','J','P','G'), fps, size) for img_path in sorted(img_paths): img = cv2.imread(img_path) img = cv2.resize(img, size) videowriter.write(img) videowriter.release() print("img2video Done") if __name__ == '__main__': visual_path="visual_yolox_x" if len(sys.argv) > 1: visual_path =sys.argv[1] txt2img(visual_path) #img2video(visual_path) ================================================ FILE: yolox/__init__.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- from .utils import configure_module configure_module() __version__ = "0.1.0" ================================================ FILE: yolox/core/__init__.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. from .launch import launch from .trainer import Trainer ================================================ FILE: yolox/core/launch.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Code are based on # https://github.com/facebookresearch/detectron2/blob/master/detectron2/engine/launch.py # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) Megvii, Inc. and its affiliates. from loguru import logger import torch import torch.distributed as dist import torch.multiprocessing as mp import yolox.utils.dist as comm from yolox.utils import configure_nccl import os import subprocess import sys import time __all__ = ["launch"] def _find_free_port(): """ Find an available port of current machine / node. """ import socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Binding to port 0 will cause the OS to find an available port for us sock.bind(("", 0)) port = sock.getsockname()[1] sock.close() # NOTE: there is still a chance the port could be taken by other processes. return port def launch( main_func, num_gpus_per_machine, num_machines=1, machine_rank=0, backend="nccl", dist_url=None, args=(), ): """ Args: main_func: a function that will be called by `main_func(*args)` num_machines (int): the total number of machines machine_rank (int): the rank of this machine (one per machine) dist_url (str): url to connect to for distributed training, including protocol e.g. "tcp://127.0.0.1:8686". Can be set to auto to automatically select a free port on localhost args (tuple): arguments passed to main_func """ world_size = num_machines * num_gpus_per_machine if world_size > 1: if int(os.environ.get("WORLD_SIZE", "1")) > 1: dist_url = "{}:{}".format( os.environ.get("MASTER_ADDR", None), os.environ.get("MASTER_PORT", "None"), ) local_rank = int(os.environ.get("LOCAL_RANK", "0")) world_size = int(os.environ.get("WORLD_SIZE", "1")) _distributed_worker( local_rank, main_func, world_size, num_gpus_per_machine, num_machines, machine_rank, backend, dist_url, args, ) exit() launch_by_subprocess( sys.argv, world_size, num_machines, machine_rank, num_gpus_per_machine, dist_url, args, ) else: main_func(*args) def launch_by_subprocess( raw_argv, world_size, num_machines, machine_rank, num_gpus_per_machine, dist_url, args, ): assert ( world_size > 1 ), "subprocess mode doesn't support single GPU, use spawn mode instead" if dist_url is None: # ------------------------hack for multi-machine training -------------------- # if num_machines > 1: master_ip = subprocess.check_output(["hostname", "--fqdn"]).decode("utf-8") master_ip = str(master_ip).strip() dist_url = "tcp://{}".format(master_ip) ip_add_file = "./" + args[1].experiment_name + "_ip_add.txt" if machine_rank == 0: port = _find_free_port() with open(ip_add_file, "w") as ip_add: ip_add.write(dist_url+'\n') ip_add.write(str(port)) else: while not os.path.exists(ip_add_file): time.sleep(0.5) with open(ip_add_file, "r") as ip_add: dist_url = ip_add.readline().strip() port = ip_add.readline() else: dist_url = "tcp://127.0.0.1" port = _find_free_port() # set PyTorch distributed related environmental variables current_env = os.environ.copy() current_env["MASTER_ADDR"] = dist_url current_env["MASTER_PORT"] = str(port) current_env["WORLD_SIZE"] = str(world_size) assert num_gpus_per_machine <= torch.cuda.device_count() if "OMP_NUM_THREADS" not in os.environ and num_gpus_per_machine > 1: current_env["OMP_NUM_THREADS"] = str(1) logger.info( "\n*****************************************\n" "Setting OMP_NUM_THREADS environment variable for each process " "to be {} in default, to avoid your system being overloaded, " "please further tune the variable for optimal performance in " "your application as needed. \n" "*****************************************".format( current_env["OMP_NUM_THREADS"] ) ) processes = [] for local_rank in range(0, num_gpus_per_machine): # each process's rank dist_rank = machine_rank * num_gpus_per_machine + local_rank current_env["RANK"] = str(dist_rank) current_env["LOCAL_RANK"] = str(local_rank) # spawn the processes cmd = ["python3", *raw_argv] process = subprocess.Popen(cmd, env=current_env) processes.append(process) for process in processes: process.wait() if process.returncode != 0: raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd) def _distributed_worker( local_rank, main_func, world_size, num_gpus_per_machine, num_machines, machine_rank, backend, dist_url, args, ): assert ( torch.cuda.is_available() ), "cuda is not available. Please check your installation." configure_nccl() global_rank = machine_rank * num_gpus_per_machine + local_rank logger.info("Rank {} initialization finished.".format(global_rank)) try: dist.init_process_group( backend=backend, init_method=dist_url, world_size=world_size, rank=global_rank, ) except Exception: logger.error("Process group URL: {}".format(dist_url)) raise # synchronize is needed here to prevent a possible timeout after calling init_process_group # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172 comm.synchronize() if global_rank == 0 and os.path.exists( "./" + args[1].experiment_name + "_ip_add.txt" ): os.remove("./" + args[1].experiment_name + "_ip_add.txt") assert num_gpus_per_machine <= torch.cuda.device_count() torch.cuda.set_device(local_rank) args[1].local_rank = local_rank args[1].num_machines = num_machines # Setup the local process group (which contains ranks within the same machine) # assert comm._LOCAL_PROCESS_GROUP is None # num_machines = world_size // num_gpus_per_machine # for i in range(num_machines): # ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine)) # pg = dist.new_group(ranks_on_i) # if i == machine_rank: # comm._LOCAL_PROCESS_GROUP = pg main_func(*args) ================================================ FILE: yolox/core/trainer.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. from loguru import logger import torch from torch.nn.parallel import DistributedDataParallel as DDP from tensorboardX import SummaryWriter from yolox.data import DataPrefetcher from yolox.utils import ( MeterBuffer, ModelEMA, all_reduce_norm, get_model_info, get_rank, get_world_size, gpu_mem_usage, load_ckpt, occupy_mem, save_checkpoint, setup_logger, synchronize, ) import datetime import os import time class Trainer: def __init__(self, exp, args): # init function only defines some basic attr, other attrs like model, optimizer are built in # before_train methods. self.exp = exp self.args = args # training related attr self.task=exp.task self.max_epoch = exp.max_epoch self.amp_training = args.fp16 self.scaler = torch.cuda.amp.GradScaler(enabled=args.fp16) self.is_distributed = get_world_size() > 1 self.rank = get_rank() self.local_rank = args.local_rank self.device = "cuda:{}".format(self.local_rank) self.use_model_ema = exp.ema # data/dataloader related attr self.data_type = torch.float16 if args.fp16 else torch.float32 self.input_size = exp.input_size self.random_flip=exp.random_flip self.best_ap = 0 # metric record self.meter = MeterBuffer(window_size=exp.print_interval) self.file_name = os.path.join(exp.output_dir, args.experiment_name) if self.rank == 0: os.makedirs(self.file_name, exist_ok=True) setup_logger( self.file_name, distributed_rank=self.rank, filename="train_log.txt", mode="a", ) def train(self): self.before_train() try: self.train_in_epoch() except Exception: raise finally: self.after_train() def train_in_epoch(self): for self.epoch in range(self.start_epoch, self.max_epoch): self.before_epoch() self.train_in_iter() self.after_epoch() def train_in_iter(self): for self.iter in range(self.max_iter): self.before_iter() self.train_one_iter() self.after_iter() def train_one_iter(self): iter_start_time = time.time() pre_inps, pre_targets,cur_inps,cur_targets= self.prefetcher.next() pre_inps = pre_inps.to(self.data_type) pre_targets = pre_targets[:,:,:5].to(self.data_type) pre_targets.requires_grad = False if self.task=="tracking": cur_inps = cur_inps.to(self.data_type) cur_targets = cur_targets[:,:,:5].to(self.data_type) cur_targets.requires_grad = False data_end_time = time.time() inps,targets=(pre_inps,cur_inps),(pre_targets,cur_targets) with torch.cuda.amp.autocast(enabled=self.amp_training): outputs = self.model(inps,targets,self.random_flip,self.input_size) loss = outputs["total_loss"] self.optimizer.zero_grad() self.scaler.scale(loss).backward() self.scaler.step(self.optimizer) self.scaler.update() if self.use_model_ema: self.ema_model.update(self.model) lr = self.lr_scheduler.update_lr(self.progress_in_iter + 1) for param_group in self.optimizer.param_groups: param_group["lr"] = lr iter_end_time = time.time() self.meter.update( iter_time=iter_end_time - iter_start_time, data_time=data_end_time - iter_start_time, lr=lr, **outputs, ) def before_train(self): logger.info("args: {}".format(self.args)) logger.info("exp value:\n{}".format(self.exp)) # model related init torch.cuda.set_device(self.local_rank) model = self.exp.get_model() # logger.info( # "Model Summary: {}".format(get_model_info(model, self.exp.test_size)) # ) model.to(self.device) # solver related init self.optimizer = self.exp.get_optimizer(self.args.batch_size) # value of epoch will be set in `resume_train` model = self.resume_train(model) # data related init self.no_aug = self.start_epoch >= self.max_epoch - self.exp.no_aug_epochs self.train_loader = self.exp.get_data_loader( batch_size=self.args.batch_size, is_distributed=self.is_distributed, no_aug=self.no_aug, ) logger.info("init prefetcher, this might take one minute or less...") self.prefetcher = DataPrefetcher(self.train_loader,self.task) # max_iter means iters per epoch self.max_iter = len(self.train_loader) self.lr_scheduler = self.exp.get_lr_scheduler( self.exp.basic_lr_per_img * self.args.batch_size, self.max_iter ) if self.args.occupy: occupy_mem(self.local_rank) if self.is_distributed: model = DDP(model, device_ids=[self.local_rank], broadcast_buffers=False,find_unused_parameters=False) if self.use_model_ema: self.ema_model = ModelEMA(model, 0.9998) self.ema_model.updates = self.max_iter * self.start_epoch self.model = model self.model.train() self.evaluator = self.exp.get_evaluator( batch_size=self.args.batch_size, is_distributed=self.is_distributed ) # Tensorboard logger if self.rank == 0: self.tblogger = SummaryWriter(self.file_name) logger.info("Training start...") #logger.info("\n{}".format(model)) def after_train(self): logger.info( "Training of experiment is done and the best AP is {:.2f}".format( self.best_ap * 100 ) ) def before_epoch(self): logger.info("---> start train epoch{}".format(self.epoch + 1)) if self.epoch + 1 == self.max_epoch - self.exp.no_aug_epochs or self.no_aug: logger.info("--->No mosaic aug now!") self.train_loader.close_mosaic() logger.info("--->Add additional L1 loss now!") if self.is_distributed: self.model.module.head.use_l1 = True else: self.model.head.use_l1 = True self.exp.eval_interval = 1 if not self.no_aug: self.save_ckpt(ckpt_name="last_mosaic_epoch") def after_epoch(self): if self.use_model_ema: self.ema_model.update_attr(self.model) self.save_ckpt(ckpt_name="latest") if (self.epoch + 1) % 10 == 0: self.save_ckpt(ckpt_name="epoch_{}".format(self.epoch+1)) if (self.epoch + 1) % self.exp.eval_interval == 0: all_reduce_norm(self.model) self.evaluate_and_save_model() def before_iter(self): pass def after_iter(self): """ `after_iter` contains two parts of logic: * log information * reset setting of resize """ # log needed information # (self.iter + 1) % self.exp.print_interval == 0 and if (self.iter + 1) % self.exp.print_interval == 0: # TODO check ETA logic left_iters = self.max_iter * self.max_epoch - (self.progress_in_iter + 1) eta_seconds = self.meter["iter_time"].global_avg * left_iters eta_str = "ETA: {}".format(datetime.timedelta(seconds=int(eta_seconds))) progress_str = "epoch: {}/{}, iter: {}/{}".format( self.epoch + 1, self.max_epoch, self.iter + 1, self.max_iter ) loss_meter = self.meter.get_filtered_meter("loss") loss_str = ", ".join( ["{}: {:.3f}".format(k, v.latest) for k, v in loss_meter.items()] ) time_meter = self.meter.get_filtered_meter("time") time_str = ", ".join( ["{}: {:.3f}s".format(k, v.avg) for k, v in time_meter.items()] ) logger.info( "{}, mem: {:.0f}Mb, {}, {}, lr: {:.3e}".format( progress_str, gpu_mem_usage(), time_str, loss_str, self.meter["lr"].latest, ) + (", size: {:d}, {}".format(self.input_size[0], eta_str)) ) self.meter.clear_meters() # random resizing if self.exp.random_size is not None and (self.progress_in_iter + 1) % 10 == 0: self.input_size = self.exp.random_resize( self.train_loader, self.epoch, self.rank, self.is_distributed ) @property def progress_in_iter(self): return self.epoch * self.max_iter + self.iter def resume_train(self, model): if self.args.resume: logger.info("resume training") if self.args.ckpt is None: ckpt_file = os.path.join(self.file_name, "latest" + "_ckpt.pth.tar") else: ckpt_file = self.args.ckpt ckpt = torch.load(ckpt_file, map_location=self.device) # resume the model/optimizer state dict model.load_state_dict(ckpt["model"]) self.optimizer.load_state_dict(ckpt["optimizer"]) start_epoch = ( self.args.start_epoch - 1 if self.args.start_epoch is not None else ckpt["start_epoch"] ) self.start_epoch = start_epoch logger.info( "loaded checkpoint '{}' (epoch {})".format( self.args.resume, self.start_epoch ) ) # noqa else: if self.args.ckpt is not None: logger.info("loading checkpoint for fine tuning") ckpt_file = self.args.ckpt ckpt = torch.load(ckpt_file, map_location=self.device)["model"] model = load_ckpt(model, ckpt) self.start_epoch = 0 return model def evaluate_and_save_model(self): evalmodel = self.ema_model.ema if self.use_model_ema else self.model ap50_95, ap50, summary = self.exp.eval( evalmodel, self.evaluator, self.is_distributed ) self.model.train() if self.rank == 0: self.tblogger.add_scalar("val/COCOAP50", ap50, self.epoch + 1) self.tblogger.add_scalar("val/COCOAP50_95", ap50_95, self.epoch + 1) logger.info("\n" + summary) synchronize() self.best_ap = max(self.best_ap, ap50_95) self.save_ckpt("last_epoch", ap50 > self.best_ap) self.best_ap = max(self.best_ap, ap50) def save_ckpt(self, ckpt_name, update_best_ckpt=False): if self.rank == 0: save_model = self.ema_model.ema if self.use_model_ema else self.model logger.info("Save weights to {}".format(self.file_name)) ckpt_state = { "start_epoch": self.epoch + 1, "model": save_model.state_dict(), "optimizer": self.optimizer.state_dict(), } save_checkpoint( ckpt_state, update_best_ckpt, self.file_name, ckpt_name, ) ================================================ FILE: yolox/data/__init__.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. from .data_augment import TrainTransform, ValTransform,DiffusionValTransform,DiffusionTrainTransform from .data_prefetcher import DataPrefetcher from .dataloading import DataLoader, get_yolox_datadir from .datasets import * from .samplers import InfiniteSampler, YoloBatchSampler ================================================ FILE: yolox/data/data_augment.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. """ Data augmentation functionality. Passed as callable transformations to Dataset classes. The data augmentation procedures were interpreted from @weiliu89's SSD paper http://arxiv.org/abs/1512.02325 """ import cv2 import numpy as np import torch from yolox.utils import xyxy2cxcywh import math import random def augment_hsv(img, hgain=0.015, sgain=0.7, vgain=0.4): r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) dtype = img.dtype # uint8 x = np.arange(0, 256, dtype=np.int16) lut_hue = ((x * r[0]) % 180).astype(dtype) lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) lut_val = np.clip(x * r[2], 0, 255).astype(dtype) img_hsv = cv2.merge( (cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)) ).astype(dtype) cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.2): # box1(4,n), box2(4,n) # Compute candidate boxes which include follwing 5 things: # box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio w1, h1 = box1[2] - box1[0], box1[3] - box1[1] w2, h2 = box2[2] - box2[0], box2[3] - box2[1] ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio return ( (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) ) # candidates def random_perspective( img, targets=(), degrees=10, translate=0.1, scale=0.1, shear=10, perspective=0.0, border=(0, 0), ): # targets = [cls, xyxy] height = img.shape[0] + border[0] * 2 # shape(h,w,c) width = img.shape[1] + border[1] * 2 # Center C = np.eye(3) C[0, 2] = -img.shape[1] / 2 # x translation (pixels) C[1, 2] = -img.shape[0] / 2 # y translation (pixels) # Rotation and Scale R = np.eye(3) a = random.uniform(-degrees, degrees) # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations s = random.uniform(scale[0], scale[1]) # s = 2 ** random.uniform(-scale, scale) R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) # Shear S = np.eye(3) S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) # Translation T = np.eye(3) T[0, 2] = ( random.uniform(0.5 - translate, 0.5 + translate) * width ) # x translation (pixels) T[1, 2] = ( random.uniform(0.5 - translate, 0.5 + translate) * height ) # y translation (pixels) # Combined rotation matrix M = T @ S @ R @ C # order of operations (right to left) is IMPORTANT ########################### # For Aug out of Mosaic # s = 1. # M = np.eye(3) ########################### if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed if perspective: img = cv2.warpPerspective( img, M, dsize=(width, height), borderValue=(114, 114, 114) ) else: # affine img = cv2.warpAffine( img, M[:2], dsize=(width, height), borderValue=(114, 114, 114) ) # Transform label coordinates n = len(targets) if n: # warp points xy = np.ones((n * 4, 3)) xy[:, :2] = targets[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape( n * 4, 2 ) # x1y1, x2y2, x1y2, x2y1 xy = xy @ M.T # transform if perspective: xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale else: # affine xy = xy[:, :2].reshape(n, 8) # create new boxes x = xy[:, [0, 2, 4, 6]] y = xy[:, [1, 3, 5, 7]] xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T # clip boxes #xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width) #xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height) # filter candidates i = box_candidates(box1=targets[:, :4].T * s, box2=xy.T) targets = targets[i] targets[:, :4] = xy[i] targets = targets[targets[:, 0] < width] targets = targets[targets[:, 2] > 0] targets = targets[targets[:, 1] < height] targets = targets[targets[:, 3] > 0] return img, targets def _distort(image): def _convert(image, alpha=1, beta=0): tmp = image.astype(float) * alpha + beta tmp[tmp < 0] = 0 tmp[tmp > 255] = 255 image[:] = tmp image = image.copy() if random.randrange(2): _convert(image, beta=random.uniform(-32, 32)) if random.randrange(2): _convert(image, alpha=random.uniform(0.5, 1.5)) image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) if random.randrange(2): tmp = image[:, :, 0].astype(int) + random.randint(-18, 18) tmp %= 180 image[:, :, 0] = tmp if random.randrange(2): _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5)) image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR) return image def _mirror(image, boxes): _, width, _ = image.shape if random.randrange(2): image = image[:, ::-1] boxes = boxes.copy() boxes[:, 0::2] = width - boxes[:, 2::-2] return image, boxes def preproc(image, input_size, mean, std, swap=(2, 0, 1)): if len(image.shape) == 3: padded_img = np.ones((input_size[0], input_size[1], 3)) * 114.0 else: padded_img = np.ones(input_size) * 114.0 img = np.array(image) r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1]) resized_img = cv2.resize( img, (int(img.shape[1] * r), int(img.shape[0] * r)), interpolation=cv2.INTER_LINEAR, ).astype(np.float32) padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img padded_img = padded_img[:, :, ::-1] padded_img /= 255.0 if mean is not None: padded_img -= mean if std is not None: padded_img /= std padded_img = padded_img.transpose(swap) padded_img = np.ascontiguousarray(padded_img, dtype=np.float32) return padded_img, r class TrainTransform: def __init__(self, p=0.5, rgb_means=None, std=None, max_labels=100): self.means = rgb_means self.std = std self.p = p self.max_labels = max_labels def __call__(self, image, targets, input_dim): boxes = targets[:, :4].copy() labels = targets[:, 4].copy() ids = targets[:, 5].copy() if len(boxes) == 0: targets = np.zeros((self.max_labels, 6), dtype=np.float32) image, r_o = preproc(image, input_dim, self.means, self.std) image = np.ascontiguousarray(image, dtype=np.float32) return image, targets image_o = image.copy() targets_o = targets.copy() height_o, width_o, _ = image_o.shape boxes_o = targets_o[:, :4] labels_o = targets_o[:, 4] ids_o = targets_o[:, 5] # bbox_o: [xyxy] to [c_x,c_y,w,h] boxes_o = xyxy2cxcywh(boxes_o) image_t = _distort(image) image_t, boxes = _mirror(image_t, boxes) height, width, _ = image_t.shape image_t, r_ = preproc(image_t, input_dim, self.means, self.std) # boxes [xyxy] 2 [cx,cy,w,h] boxes = xyxy2cxcywh(boxes) boxes *= r_ mask_b = np.minimum(boxes[:, 2], boxes[:, 3]) > 1 boxes_t = boxes[mask_b] labels_t = labels[mask_b] ids_t = ids[mask_b] if len(boxes_t) == 0: image_t, r_o = preproc(image_o, input_dim, self.means, self.std) boxes_o *= r_o boxes_t = boxes_o labels_t = labels_o ids_t = ids_o labels_t = np.expand_dims(labels_t, 1) ids_t = np.expand_dims(ids_t, 1) targets_t = np.hstack((labels_t, boxes_t, ids_t)) padded_labels = np.zeros((self.max_labels, 6)) padded_labels[range(len(targets_t))[: self.max_labels]] = targets_t[ : self.max_labels ] padded_labels = np.ascontiguousarray(padded_labels, dtype=np.float32) image_t = np.ascontiguousarray(image_t, dtype=np.float32) return image_t, padded_labels class DiffusionValTransform: def __init__(self, p=0.5, rgb_means=None, std=None, max_labels=100): self.means = rgb_means self.std = std self.p = p self.max_labels = max_labels def __call__(self, image, targets, input_dim): if len(targets) == 0: targets = np.zeros((self.max_labels, 6), dtype=np.float32) image, r_o = preproc(image, input_dim, self.means, self.std) image = np.ascontiguousarray(image, dtype=np.float32) return image, targets image_o = image.copy() targets_o = targets.copy() boxes_o = targets_o[:, :4] labels_o = targets_o[:, 4] ids_o = targets_o[:, 5] # bbox_o: [xyxy] to [c_x,c_y,w,h] boxes_o = xyxy2cxcywh(boxes_o) image_t, r_o = preproc(image_o, input_dim, self.means, self.std) boxes_o *= r_o boxes_t = boxes_o labels_t = labels_o ids_t = ids_o labels_t = np.expand_dims(labels_t, 1) ids_t = np.expand_dims(ids_t, 1) targets_t = np.hstack((labels_t, boxes_t, ids_t)) padded_labels = np.zeros((self.max_labels, 6)) padded_labels[range(len(targets_t))[: self.max_labels]] = targets_t[ : self.max_labels ] padded_labels = np.ascontiguousarray(padded_labels, dtype=np.float32) image_t = np.ascontiguousarray(image_t, dtype=np.float32) return image_t, padded_labels class DiffusionTrainTransform: def __init__(self, p=0.5, rgb_means=None, std=None, max_labels=100): self.means = rgb_means self.std = std self.p = p self.max_labels = max_labels def __call__(self, ref_image, ref_targets, track_image, track_targets,input_dim): if len(ref_targets) == 0: ref_targets_t = np.zeros((self.max_labels, 6), dtype=np.float32) ref_image_t, r_o = preproc(ref_image, input_dim, self.means, self.std) ref_image_t = np.ascontiguousarray(ref_image_t, dtype=np.float32) track_targets_t = np.zeros((self.max_labels, 6), dtype=np.float32) track_image_t, r_o = preproc(track_image, input_dim, self.means, self.std) track_image_t = np.ascontiguousarray(track_image_t, dtype=np.float32) return ref_image_t, ref_targets_t,track_image_t,track_targets_t ref_image_o = ref_image.copy() ref_targets_o = ref_targets.copy() ref_boxes_o = ref_targets_o[:, :4] ref_labels_o = ref_targets_o[:, 4] ref_ids_o = ref_targets_o[:, 5] # bbox_o: [xyxy] to [c_x,c_y,w,h] ref_boxes_o = xyxy2cxcywh(ref_boxes_o) ref_image_t, ref_r_o = preproc(ref_image_o, input_dim, self.means, self.std) ref_boxes_o *= ref_r_o ref_boxes_t = ref_boxes_o ref_labels_t = ref_labels_o ref_ids_t = ref_ids_o ref_labels_t = np.expand_dims(ref_labels_t, 1) ref_ids_t = np.expand_dims(ref_ids_t, 1) ref_targets_t = np.hstack((ref_labels_t, ref_boxes_t, ref_ids_t)) track_image_o = track_image.copy() track_targets_o = track_targets.copy() track_boxes_o = track_targets_o[:, :4] track_labels_o = track_targets_o[:, 4] track_ids_o = track_targets_o[:, 5] # bbox_o: [xyxy] to [c_x,c_y,w,h] track_boxes_o = xyxy2cxcywh(track_boxes_o) track_image_t, track_r_o = preproc(track_image_o, input_dim, self.means, self.std) track_boxes_o *= track_r_o track_boxes_t = track_boxes_o track_labels_t = track_labels_o track_ids_t = track_ids_o track_labels_t = np.expand_dims(track_labels_t, 1) track_ids_t = np.expand_dims(track_ids_t, 1) track_targets_t = np.hstack((track_labels_t, track_boxes_t, track_ids_t)) ref_padded_labels = np.zeros((self.max_labels, 6)) track_padded_labels = np.zeros((self.max_labels, 6)) pair_indices=np.argwhere((ref_targets_t[:,5].reshape(-1,1)==track_targets_t[:,5].reshape(1,-1))>0) ref_targets_t=ref_targets_t[pair_indices[:,0]] track_targets_t=track_targets_t[pair_indices[:,1]] ref_padded_labels[range(len(ref_targets_t))[: self.max_labels]] = ref_targets_t[ : self.max_labels ] ref_padded_labels = np.ascontiguousarray(ref_padded_labels, dtype=np.float32) ref_image_t = np.ascontiguousarray(ref_image_t, dtype=np.float32) track_padded_labels[range(len(track_targets_t))[: self.max_labels]] = track_targets_t[ : self.max_labels ] track_padded_labels = np.ascontiguousarray(track_padded_labels, dtype=np.float32) track_image_t = np.ascontiguousarray(track_image_t, dtype=np.float32) return ref_image_t,ref_padded_labels,track_image_t,track_padded_labels class ValTransform: """ Defines the transformations that should be applied to test PIL image for input into the network dimension -> tensorize -> color adj Arguments: resize (int): input dimension to SSD rgb_means ((int,int,int)): average RGB of the dataset (104,117,123) swap ((int,int,int)): final order of channels Returns: transform (transform) : callable transform to be applied to test/val data """ def __init__(self, rgb_means=None, std=None, swap=(2, 0, 1)): self.means = rgb_means self.swap = swap self.std = std # assume input is cv2 img for now def __call__(self, img, res, input_size): img, _ = preproc(img, input_size, self.means, self.std, self.swap) return img, np.zeros((1, 5)) ================================================ FILE: yolox/data/data_prefetcher.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. import torch import torch.distributed as dist from yolox.utils import synchronize import random class DataPrefetcher: """ DataPrefetcher is inspired by code of following file: https://github.com/NVIDIA/apex/blob/master/examples/imagenet/main_amp.py It could speedup your pytorch dataloader. For more information, please check https://github.com/NVIDIA/apex/issues/304#issuecomment-493562789. """ def __init__(self, loader,task): self.loader = iter(loader) self.task=task self.stream = torch.cuda.Stream() self.record_stream = DataPrefetcher._record_stream_for_image self.preload() def preload(self): try: if self.task=="tracking": self.next_input_pre, self.next_target_pre,self.next_input_cur, self.next_target_cur,_, _ = next(self.loader) else: self.next_input_pre, self.next_target_pre, _, _ = next(self.loader) except StopIteration: self.next_input_pre = None self.next_target_pre = None if self.task=="tracking": self.next_input_cur = None self.next_target_cur = None return with torch.cuda.stream(self.stream): self.next_input_pre = self.next_input_pre.cuda(non_blocking=True) self.next_target_pre = self.next_target_pre.cuda(non_blocking=True) if self.task=="tracking": self.next_input_cur = self.next_input_cur.cuda(non_blocking=True) self.next_target_cur = self.next_target_cur.cuda(non_blocking=True) def next(self): torch.cuda.current_stream().wait_stream(self.stream) input_pre = self.next_input_pre target_pre = self.next_target_pre input_cur = None target_cur = None if self.task=="tracking": input_cur = self.next_input_cur target_cur = self.next_target_cur if input_pre is not None: self.record_stream(input_pre) if target_pre is not None: target_pre.record_stream(torch.cuda.current_stream()) if self.task=="tracking": if input_cur is not None: self.record_stream(input_cur) if target_cur is not None: target_cur.record_stream(torch.cuda.current_stream()) self.preload() return input_pre,target_pre,input_cur,target_cur @staticmethod def _record_stream_for_image(input): input.record_stream(torch.cuda.current_stream()) def random_resize(data_loader, exp, epoch, rank, is_distributed): tensor = torch.LongTensor(1).cuda() if is_distributed: synchronize() if rank == 0: if epoch > exp.max_epoch - 10: size = exp.input_size else: size = random.randint(*exp.random_size) size = int(32 * size) tensor.fill_(size) if is_distributed: synchronize() dist.broadcast(tensor, 0) input_size = data_loader.change_input_dim(multiple=tensor.item(), random_range=None) return ================================================ FILE: yolox/data/dataloading.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. import torch from torch.utils.data.dataloader import DataLoader as torchDataLoader from torch.utils.data.dataloader import default_collate import os import random from .samplers import YoloBatchSampler def get_yolox_datadir(): """ get dataset dir of YOLOX. If environment variable named `YOLOX_DATADIR` is set, this function will return value of the environment variable. Otherwise, use data """ yolox_datadir = os.getenv("YOLOX_DATADIR", None) if yolox_datadir is None: import yolox yolox_path = os.path.dirname(os.path.dirname(yolox.__file__)) yolox_datadir = os.path.join(yolox_path, "datasets") return yolox_datadir class DataLoader(torchDataLoader): """ Lightnet dataloader that enables on the fly resizing of the images. See :class:`torch.utils.data.DataLoader` for more information on the arguments. Check more on the following website: https://gitlab.com/EAVISE/lightnet/-/blob/master/lightnet/data/_dataloading.py Note: This dataloader only works with :class:`lightnet.data.Dataset` based datasets. Example: >>> class CustomSet(ln.data.Dataset): ... def __len__(self): ... return 4 ... @ln.data.Dataset.resize_getitem ... def __getitem__(self, index): ... # Should return (image, anno) but here we return (input_dim,) ... return (self.input_dim,) >>> dl = ln.data.DataLoader( ... CustomSet((200,200)), ... batch_size = 2, ... collate_fn = ln.data.list_collate # We want the data to be grouped as a list ... ) >>> dl.dataset.input_dim # Default input_dim (200, 200) >>> for d in dl: ... d [[(200, 200), (200, 200)]] [[(200, 200), (200, 200)]] >>> dl.change_input_dim(320, random_range=None) (320, 320) >>> for d in dl: ... d [[(320, 320), (320, 320)]] [[(320, 320), (320, 320)]] >>> dl.change_input_dim((480, 320), random_range=None) (480, 320) >>> for d in dl: ... d [[(480, 320), (480, 320)]] [[(480, 320), (480, 320)]] """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.__initialized = False shuffle = False batch_sampler = None if len(args) > 5: shuffle = args[2] sampler = args[3] batch_sampler = args[4] elif len(args) > 4: shuffle = args[2] sampler = args[3] if "batch_sampler" in kwargs: batch_sampler = kwargs["batch_sampler"] elif len(args) > 3: shuffle = args[2] if "sampler" in kwargs: sampler = kwargs["sampler"] if "batch_sampler" in kwargs: batch_sampler = kwargs["batch_sampler"] else: if "shuffle" in kwargs: shuffle = kwargs["shuffle"] if "sampler" in kwargs: sampler = kwargs["sampler"] if "batch_sampler" in kwargs: batch_sampler = kwargs["batch_sampler"] # Use custom BatchSampler if batch_sampler is None: if sampler is None: if shuffle: sampler = torch.utils.data.sampler.RandomSampler(self.dataset) # sampler = torch.utils.data.DistributedSampler(self.dataset) else: sampler = torch.utils.data.sampler.SequentialSampler(self.dataset) batch_sampler = YoloBatchSampler( sampler, self.batch_size, self.drop_last, input_dimension=self.dataset.input_dim, ) # batch_sampler = IterationBasedBatchSampler(batch_sampler, num_iterations = self.batch_sampler = batch_sampler self.__initialized = True def close_mosaic(self): self.batch_sampler.mosaic = False def change_input_dim(self, multiple=32, random_range=(10, 19)): """This function will compute a new size and update it on the next mini_batch. Args: multiple (int or tuple, optional): values to multiply the randomly generated range by. Default **32** random_range (tuple, optional): This (min, max) tuple sets the range for the randomisation; Default **(10, 19)** Return: tuple: width, height tuple with new dimension Note: The new size is generated as follows: |br| First we compute a random integer inside ``[random_range]``. We then multiply that number with the ``multiple`` argument, which gives our final new input size. |br| If ``multiple`` is an integer we generate a square size. If you give a tuple of **(width, height)**, the size is computed as :math:`rng * multiple[0], rng * multiple[1]`. Note: You can set the ``random_range`` argument to **None** to set an exact size of multiply. |br| See the example above for how this works. """ if random_range is None: size = 1 else: size = random.randint(*random_range) if isinstance(multiple, int): size = (size * multiple, size * multiple) else: size = (size * multiple[0], size * multiple[1]) self.batch_sampler.new_input_dim = size return size def list_collate(batch): """ Function that collates lists or tuples together into one list (of lists/tuples). Use this as the collate function in a Dataloader, if you want to have a list of items as an output, as opposed to tensors (eg. Brambox.boxes). """ items = list(zip(*batch)) for i in range(len(items)): if isinstance(items[i][0], (list, tuple)): items[i] = list(items[i]) else: items[i] = default_collate(items[i]) return items ================================================ FILE: yolox/data/samplers.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. import torch import torch.distributed as dist from torch.utils.data.sampler import BatchSampler as torchBatchSampler from torch.utils.data.sampler import Sampler import itertools from typing import Optional class YoloBatchSampler(torchBatchSampler): """ This batch sampler will generate mini-batches of (dim, index) tuples from another sampler. It works just like the :class:`torch.utils.data.sampler.BatchSampler`, but it will prepend a dimension, whilst ensuring it stays the same across one mini-batch. """ def __init__(self, *args, input_dimension=None, mosaic=True, **kwargs): super().__init__(*args, **kwargs) self.input_dim = input_dimension self.new_input_dim = None self.mosaic = mosaic def __iter__(self): self.__set_input_dim() for batch in super().__iter__(): yield [(self.input_dim, idx, self.mosaic) for idx in batch] self.__set_input_dim() def __set_input_dim(self): """ This function randomly changes the the input dimension of the dataset. """ if self.new_input_dim is not None: self.input_dim = (self.new_input_dim[0], self.new_input_dim[1]) self.new_input_dim = None class InfiniteSampler(Sampler): """ In training, we only care about the "infinite stream" of training data. So this sampler produces an infinite stream of indices and all workers cooperate to correctly shuffle the indices and sample different indices. The samplers in each worker effectively produces `indices[worker_id::num_workers]` where `indices` is an infinite stream of indices consisting of `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True) or `range(size) + range(size) + ...` (if shuffle is False) """ def __init__( self, size: int, shuffle: bool = True, seed: Optional[int] = 0, rank=0, world_size=1, ): """ Args: size (int): the total number of data of the underlying dataset to sample from shuffle (bool): whether to shuffle the indices or not seed (int): the initial seed of the shuffle. Must be the same across all workers. If None, will use a random seed shared among workers (require synchronization among all workers). """ self._size = size assert size > 0 self._shuffle = shuffle self._seed = int(seed) if dist.is_available() and dist.is_initialized(): self._rank = dist.get_rank() self._world_size = dist.get_world_size() else: self._rank = rank self._world_size = world_size def __iter__(self): start = self._rank yield from itertools.islice( self._infinite_indices(), start, None, self._world_size ) def _infinite_indices(self): g = torch.Generator() g.manual_seed(self._seed) while True: if self._shuffle: yield from torch.randperm(self._size, generator=g) else: yield from torch.arange(self._size) def __len__(self): return self._size // self._world_size ================================================ FILE: yolox/evaluators/__init__.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. from .coco_evaluator import COCOEvaluator from .diffusion_mot_evaluator import DiffusionMOTEvaluator from .diffusion_mot_evaluator_kl import DiffusionMOTEvaluatorKL ================================================ FILE: yolox/evaluators/coco_evaluator.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) Megvii, Inc. and its affiliates. from loguru import logger from tqdm import tqdm import torch from yolox.utils import ( gather, is_main_process, postprocess, diffusion_postprocess, synchronize, time_synchronized, xyxy2xywh ) import contextlib import io import itertools import json import tempfile import time class COCOEvaluator: """ COCO AP Evaluation class. All the data in the val2017 dataset are processed and evaluated by COCO API. """ def __init__( self, dataloader, img_size, confthre, nmsthre3d, detthre, nmsthre2d, num_classes, testdev=False ): """ Args: dataloader (Dataloader): evaluate dataloader. img_size (int): image size after preprocess. images are resized to squares whose shape is (img_size, img_size). confthre (float): confidence threshold ranging from 0 to 1, which is defined in the config file. nmsthre (float): IoU threshold of non-max supression ranging from 0 to 1. """ self.dataloader = dataloader self.img_size = img_size self.confthre = confthre self.nmsthre3d = nmsthre3d self.detthre=detthre self.nmsthre2d=nmsthre2d self.num_classes = num_classes self.testdev = testdev def evaluate( self, model, distributed=False, half=False, trt_file=None, decoder=None, test_size=None, ): """ COCO average precision (AP) Evaluation. Iterate inference on the test dataset and the results are evaluated by COCO API. NOTE: This function will change training mode to False, please save states if needed. Args: model : model to evaluate. Returns: ap50_95 (float) : COCO AP of IoU=50:95 ap50 (float) : COCO AP of IoU=50 summary (sr): summary info of evaluation. """ # TODO half to amp_test tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor model = model.eval() if half: model = model.half() ids = [] data_list = [] progress_bar = tqdm if is_main_process() else iter inference_time = 0 nms_time = 0 n_samples = len(self.dataloader) - 1 if trt_file is not None: from torch2trt import TRTModule model_trt = TRTModule() model_trt.load_state_dict(torch.load(trt_file)) x = torch.ones(1, 3, test_size[0], test_size[1]).cuda() model(x) model = model_trt for cur_iter, (imgs,targets,info_imgs, ids) in enumerate( progress_bar(self.dataloader) ): with torch.no_grad(): imgs = imgs.type(tensor_type) targets=targets.type(tensor_type) # skip the the last iters since batchsize might be not enough for batch inference is_time_record = cur_iter < len(self.dataloader) - 1 if is_time_record: start = time.time() bboxes=targets[...,1:5] outputs = model((imgs,None),(None,None)) if decoder is not None: outputs = decoder(outputs, dtype=outputs.type()) if is_time_record: infer_end = time_synchronized() inference_time += infer_end - start pre_outputs,cur_outputs=torch.split(outputs[0],len(outputs[0])//2) outputs = diffusion_postprocess( pre_outputs,cur_outputs,outputs[1],conf_thre=self.confthre, det_thre=self.detthre,nms_thre3d=self.nmsthre3d,nms_thre2d=self.nmsthre2d ) if is_time_record: nms_end = time_synchronized() nms_time += nms_end - infer_end data_list.extend(self.convert_to_coco_format(outputs, info_imgs, ids)) statistics = torch.cuda.FloatTensor([inference_time, nms_time, n_samples]) if distributed: data_list = gather(data_list, dst=0) data_list = list(itertools.chain(*data_list)) torch.distributed.reduce(statistics, dst=0) eval_results = self.evaluate_prediction(data_list, statistics) synchronize() return eval_results def convert_to_coco_format(self, outputs, info_imgs, ids): data_list = [] for (output, img_h, img_w, img_id) in zip( outputs, info_imgs[0], info_imgs[1], ids ): if output is None: continue output = output.cpu() bboxes = output[:, 0:4] # preprocessing: resize scale = min( self.img_size[0] / float(img_h), self.img_size[1] / float(img_w) ) bboxes /= scale bboxes = xyxy2xywh(bboxes) cls = output[:, 6] scores = output[:, 4] * output[:, 5] for ind in range(bboxes.shape[0]): label = self.dataloader.dataset.class_ids[int(cls[ind])] pred_data = { "image_id": int(img_id), "category_id": label, "bbox": bboxes[ind].numpy().tolist(), "score": scores[ind].numpy().item(), "segmentation": [], } # COCO json format data_list.append(pred_data) return data_list def evaluate_prediction(self, data_dict, statistics): if not is_main_process(): return 0, 0, None logger.info("Evaluate in main process...") annType = ["segm", "bbox", "keypoints"] inference_time = statistics[0].item() nms_time = statistics[1].item() n_samples = statistics[2].item() a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size) a_nms_time = 1000 * nms_time / (n_samples * self.dataloader.batch_size) time_info = ", ".join( [ "Average {} time: {:.2f} ms".format(k, v) for k, v in zip( ["forward", "NMS", "inference"], [a_infer_time, a_nms_time, (a_infer_time + a_nms_time)], ) ] ) info = time_info + "\n" # Evaluate the Dt (detection) json comparing with the ground truth if len(data_dict) > 0: cocoGt = self.dataloader.dataset.coco # TODO: since pycocotools can't process dict in py36, write data to json file. if self.testdev: json.dump(data_dict, open("./yolox_testdev_2017.json", "w")) cocoDt = cocoGt.loadRes("./yolox_testdev_2017.json") else: _, tmp = tempfile.mkstemp() json.dump(data_dict, open(tmp, "w")) cocoDt = cocoGt.loadRes(tmp) ''' try: from yolox.layers import COCOeval_opt as COCOeval except ImportError: from pycocotools import cocoeval as COCOeval logger.warning("Use standard COCOeval.") ''' #from pycocotools.cocoeval import COCOeval from yolox.layers import COCOeval_opt as COCOeval cocoEval = COCOeval(cocoGt, cocoDt, annType[1]) cocoEval.evaluate() cocoEval.accumulate() redirect_string = io.StringIO() with contextlib.redirect_stdout(redirect_string): cocoEval.summarize() info += redirect_string.getvalue() return cocoEval.stats[0], cocoEval.stats[1], info else: return 0, 0, info ================================================ FILE: yolox/evaluators/diffusion_mot_evaluator.py ================================================ from collections import defaultdict from loguru import logger from tqdm import tqdm import torch from yolox.utils import ( gather, is_main_process, postprocess, synchronize, time_synchronized, xyxy2xywh ) from yolox.tracker.diffusion_tracker import DiffusionTracker from yolox.models import YOLOXHead import contextlib import io import os import itertools import json import tempfile import time import numpy as np def write_results(filename, results): save_format = '{frame},{id},{x1:.1f},{y1:.1f},{w:.1f},{h:.1f},{s:.2f},-1,-1,-1\n' with open(filename, 'w') as f: for frame_id, tlwhs, track_ids, scores in results: for tlwh, track_id, score in zip(tlwhs, track_ids, scores): if track_id < 0: continue x1, y1, w, h = tlwh line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, w=w, h=h, s=score) f.write(line) logger.info('save results to {}'.format(filename)) def write_results_no_score(filename, results): save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\n' with open(filename, 'w') as f: for frame_id, tlwhs, track_ids in results: for tlwh, track_id in zip(tlwhs, track_ids): if track_id < 0: continue x1, y1, w, h = tlwh line = save_format.format(frame=frame_id, id=track_id, x1=round(x1, 1), y1=round(y1, 1), w=round(w, 1), h=round(h, 1)) f.write(line) logger.info('save results to {}'.format(filename)) class DiffusionMOTEvaluator: """ COCO AP Evaluation class. All the data in the val2017 dataset are processed and evaluated by COCO API. """ def __init__( self, args, dataloader, img_size, confthre, nmsthre3d, detthre,nmsthre2d,interval, num_classes): """ Args: dataloader (Dataloader): evaluate dataloader. img_size (int): image size after preprocess. images are resized to squares whose shape is (img_size, img_size). confthre (float): confidence threshold ranging from 0 to 1, which is defined in the config file. nmsthre (float): IoU threshold of non-max supression ranging from 0 to 1. """ self.dataloader = dataloader self.img_size = img_size self.confthre = confthre self.nmsthre3d = nmsthre3d self.detthre=detthre self.nmsthre2d=nmsthre2d self.num_classes = num_classes self.association_interval=interval self.args = args def evaluate( self, model, distributed=False, half=False, trt_file=None, decoder=None, test_size=None, result_folder=None ): """ COCO average precision (AP) Evaluation. Iterate inference on the test dataset and the results are evaluated by COCO API. NOTE: This function will change training mode to False, please save states if needed. Args: model : model to evaluate. Returns: ap50_95 (float) : COCO AP of IoU=50:95 ap50 (float) : COCO AP of IoU=50 summary (sr): summary info of evaluation. """ # TODO half to amp_test tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor model = model.eval() if half: model = model.half() ids = [] data_list = [] results = [] seq_ids=[] seq_info_imgs=[] seq_frame_ids=[] video_names = defaultdict() ori_detthre=self.detthre ori_confthre=self.confthre progress_bar = tqdm if is_main_process() else iter track_time = 0 n_samples = len(self.dataloader) - 1 if trt_file is not None: from torch2trt import TRTModule model_trt = TRTModule() model_trt.load_state_dict(torch.load(trt_file)) x = torch.ones(1, 3, test_size[0], test_size[1]).cuda() model(x) model = model_trt tracker = DiffusionTracker(model,tensor_type) for cur_iter, (imgs, _, info_imgs, ids) in enumerate( progress_bar(self.dataloader) ): with torch.no_grad(): # init tracker frame_id = info_imgs[2].item() video_id = info_imgs[3].item() img_file_name = info_imgs[4] video_name = img_file_name[0].split('/')[0] if video_name not in video_names: video_names[video_id] = video_name self.detthre=ori_detthre self.confthre=ori_confthre # if video_name == 'MOT17-02-FRCNN' or video_name == 'MOT17-01-FRCNN': # #self.association_interval = 1 # self.confthre = 0.6 # elif video_name == 'MOT17-04-FRCNN' or video_name=='MOT17-03-FRCNN': # self.detthre = 0.5 # #self.association_interval = 1 # self.confthre = 0.4 # elif video_name == 'MOT17-05-FRCNN' or video_name == 'MOT17-06-FRCNN': # #self.association_interval = 1 # self.confthre = 0.4 # elif video_name == 'MOT17-09-FRCNN' or video_name == 'MOT17-07-FRCNN': # self.confthre = 0.3 # self.detthre = 0.5 # self.nmsthre3d = 0.6 # elif video_name == 'MOT17-10-FRCNN' or video_name == 'MOT17-8-FRCNN': # self.confthre = 0.4 # self.nmsthre3d = 0.5 # elif video_name == 'MOT17-11-FRCNN' or video_name == 'MOT17-12-FRCNN': # self.confthre = 0.5 # elif video_name == 'MOT17-13-FRCNN' or video_name == 'MOT17-14-FRCNN': # self.confthre = 0.5 # self.detthre = 0.5 # if video_name =="MOT20-06" or video_name=="MOT20-08": # self.detthre=0.3 # if video_name!="MOT20-01" and video_name!="MOT20-02": # continue if frame_id == 1: if len(seq_ids) != 0: outputs=tracker.get_results() track_time+=tracker.total_time result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id - 1])) for output,info_img,id,cur_frame_id in zip(outputs,seq_info_imgs,seq_ids,seq_frame_ids): output_results,scale = self.convert_to_coco_format(output, info_img, id) data_list.extend(output_results) # run tracking online_tlwhs = [] online_ids = [] online_scores = [] for tid,obj in zip(*output): xyxy = obj[:4]/scale tlwh = [xyxy[0],xyxy[1],xyxy[2]-xyxy[0],xyxy[3]-xyxy[1]] vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) online_scores.append(obj[4]) # save results results.append((cur_frame_id, online_tlwhs, online_ids, online_scores)) write_results(result_filename, results) results = [] seq_ids=[] seq_info_imgs=[] seq_frame_ids=[] tracker = DiffusionTracker(model,tensor_type,self.confthre,self.detthre,self.nmsthre3d,self.nmsthre2d,self.association_interval) imgs = imgs.type(tensor_type) # skip the the last iters since batchsize might be not enough for batch inference tracker.update(imgs) seq_ids.append(ids) seq_info_imgs.append(info_imgs) seq_frame_ids.append(frame_id) if cur_iter == len(self.dataloader) - 1: result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id])) outputs=tracker.get_results() track_time+=tracker.total_time for output,info_img,id,cur_frame_id in zip(outputs,seq_info_imgs,seq_ids,seq_frame_ids): output_results,scale = self.convert_to_coco_format(output, info_img, id) data_list.extend(output_results) # run tracking online_tlwhs = [] online_ids = [] online_scores = [] for tid,obj in zip(*output): xyxy = obj[:4]/scale tlwh = [xyxy[0],xyxy[1],xyxy[2]-xyxy[0],xyxy[3]-xyxy[1]] vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(tid) online_scores.append(obj[4]) # save results results.append((cur_frame_id, online_tlwhs, online_ids, online_scores)) write_results(result_filename, results) print("diffusion track fps : {}".format(n_samples*2/track_time)) statistics = torch.cuda.FloatTensor([0, track_time, n_samples]) if distributed: data_list = gather(data_list, dst=0) data_list = list(itertools.chain(*data_list)) torch.distributed.reduce(statistics, dst=0) eval_results = self.evaluate_prediction(data_list, statistics) synchronize() return eval_results def convert_to_coco_format(self, output, info_imgs, ids): data_list = [] scale = min( self.img_size[0] / float(info_imgs[0]), self.img_size[1] / float(info_imgs[1]) ) bboxes = [] clses = [] scores = [] if len(output[1])>0: for t in output[1]: bboxes.append(t[:4]) clses.append(0) scores.append(t[4]) bboxes=np.array(bboxes) bboxes /= scale bboxes = xyxy2xywh(bboxes) for ind in range(len(bboxes)): label = self.dataloader.dataset.class_ids[int(clses[ind])] pred_data = { "image_id": int(ids[0]), "category_id": label, "bbox": bboxes[ind].tolist(), "score": float(scores[ind]), "segmentation": [], } # COCO json format data_list.append(pred_data) return data_list,scale def evaluate_prediction(self, data_dict, statistics): if not is_main_process(): return 0, 0, None logger.info("Evaluate in main process...") annType = ["segm", "bbox", "keypoints"] inference_time = statistics[0].item() track_time = statistics[1].item() n_samples = statistics[2].item() a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size) a_track_time = 1000 * track_time / (n_samples * self.dataloader.batch_size) time_info = ", ".join( [ "Average {} time: {:.2f} ms".format(k, v) for k, v in zip( ["forward", "track", "inference"], [a_infer_time, a_track_time, (a_infer_time + a_track_time)], ) ] ) info = time_info + "\n" # Evaluate the Dt (detection) json comparing with the ground truth if len(data_dict) > 0: cocoGt = self.dataloader.dataset.coco # TODO: since pycocotools can't process dict in py36, write data to json file. _, tmp = tempfile.mkstemp() json.dump(data_dict, open(tmp, "w")) cocoDt = cocoGt.loadRes(tmp) ''' try: from yolox.layers import COCOeval_opt as COCOeval except ImportError: from pycocotools import cocoeval as COCOeval logger.warning("Use standard COCOeval.") ''' #from pycocotools.cocoeval import COCOeval from yolox.layers import COCOeval_opt as COCOeval cocoEval = COCOeval(cocoGt, cocoDt, annType[1]) cocoEval.evaluate() cocoEval.accumulate() redirect_string = io.StringIO() with contextlib.redirect_stdout(redirect_string): cocoEval.summarize() info += redirect_string.getvalue() return cocoEval.stats[0], cocoEval.stats[1], info else: return 0, 0, info ================================================ FILE: yolox/evaluators/diffusion_mot_evaluator_kl.py ================================================ from collections import defaultdict from loguru import logger from tqdm import tqdm import torch from yolox.utils import ( gather, is_main_process, postprocess, synchronize, time_synchronized, xyxy2xywh ) from yolox.tracker.diffusion_tracker_kl import DiffusionTracker from yolox.models import YOLOXHead import contextlib import io import os import itertools import json import tempfile import time import numpy as np import cv2 from yolox.utils.visualize import plot_tracking def write_results(filename, results): save_format = '{frame},{id},{x1:.1f},{y1:.1f},{w:.1f},{h:.1f},{s:.2f},-1,-1,-1\n' with open(filename, 'w') as f: for frame_id, tlwhs, track_ids, scores in results: for tlwh, track_id, score in zip(tlwhs, track_ids, scores): if track_id < 0: continue x1, y1, w, h = tlwh line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, w=w, h=h, s=score) f.write(line) logger.info('save results to {}'.format(filename)) def write_results_no_score(filename, results): save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\n' with open(filename, 'w') as f: for frame_id, tlwhs, track_ids in results: for tlwh, track_id in zip(tlwhs, track_ids): if track_id < 0: continue x1, y1, w, h = tlwh line = save_format.format(frame=frame_id, id=track_id, x1=round(x1, 1), y1=round(y1, 1), w=round(w, 1), h=round(h, 1)) f.write(line) logger.info('save results to {}'.format(filename)) class DiffusionMOTEvaluatorKL: """ COCO AP Evaluation class. All the data in the val2017 dataset are processed and evaluated by COCO API. """ def __init__( self, args, dataloader, img_size, confthre, nmsthre3d, detthre,nmsthre2d,interval, num_classes): """ Args: dataloader (Dataloader): evaluate dataloader. img_size (int): image size after preprocess. images are resized to squares whose shape is (img_size, img_size). confthre (float): confidence threshold ranging from 0 to 1, which is defined in the config file. nmsthre (float): IoU threshold of non-max supression ranging from 0 to 1. """ self.dataloader = dataloader self.img_size = img_size self.confthre = confthre self.nmsthre3d = nmsthre3d self.detthre=detthre self.nmsthre2d=nmsthre2d self.num_classes = num_classes self.association_interval=interval self.args = args def evaluate( self, model, distributed=False, half=False, trt_file=None, decoder=None, test_size=None, result_folder=None ): """ COCO average precision (AP) Evaluation. Iterate inference on the test dataset and the results are evaluated by COCO API. NOTE: This function will change training mode to False, please save states if needed. Args: model : model to evaluate. Returns: ap50_95 (float) : COCO AP of IoU=50:95 ap50 (float) : COCO AP of IoU=50 summary (sr): summary info of evaluation. """ # TODO half to amp_test tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor model = model.eval() if half: model = model.half() ids = [] data_list = [] results = [] # seq_ids=[] # seq_info_imgs=[] # seq_frame_ids=[] video_names = defaultdict() ori_detthre=self.detthre ori_confthre=self.confthre progress_bar = tqdm if is_main_process() else iter track_time = 0 n_samples = len(self.dataloader) - 1 if trt_file is not None: from torch2trt import TRTModule model_trt = TRTModule() model_trt.load_state_dict(torch.load(trt_file)) x = torch.ones(1, 3, test_size[0], test_size[1]).cuda() model(x) model = model_trt tracker = DiffusionTracker(model,tensor_type) for cur_iter, (imgs, _, info_imgs, ids) in enumerate( progress_bar(self.dataloader) ): with torch.no_grad(): # init tracker frame_id = info_imgs[2].item() video_id = info_imgs[3].item() img_file_name = info_imgs[4] video_name = img_file_name[0].split('/')[0] # if not ("MOT17-12" in video_name or "MOT17-14" in video_name): # continue if video_name not in video_names: video_names[video_id] = video_name self.detthre=ori_detthre self.confthre=ori_confthre # if video_name =="MOT20-06" or video_name=="MOT20-08": # self.detthre=0.4 # if video_name!="dancetrack0007": # continue if frame_id == 1: # text_path="DiffusionTrack_outputs/yolox_x_diffusion_track_mot20/track_results_mot20_test/{}.txt".format(video_name) # scale = min( # 896 / float(info_imgs[0]), 1600 / float(info_imgs[1]) # ) # detections=defaultdict(list) # with open(text_path,'r') as f: # for line in f.readlines(): # data=line.strip().split(',') # detections[int(data[0])].append([float(data[2])*scale,float(data[3])*scale,(float(data[4])+float(data[2]))*scale,(float(data[5])+float(data[3]))*scale,1,float(data[6])]) detections=None tracker = DiffusionTracker(model,tensor_type,self.confthre,self.detthre,self.nmsthre3d,self.nmsthre2d,self.association_interval,detections) if len(results) != 0: result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id - 1])) write_results(result_filename, results) results = [] # skip the the last iters since batchsize might be not enough for batch inference imgs = imgs.type(tensor_type) output,association_time=tracker.update(imgs) track_time+=association_time output_results,scale = self.convert_to_coco_format(output, info_imgs, ids) data_list.extend(output_results) # run tracking online_tlwhs = [] online_ids = [] online_scores = [] for t in output: tlwh = t._tlwh/scale # tlwh = [xyxy[0],xyxy[1],xyxy[2]-xyxy[0],xyxy[3]-xyxy[1]] vertical = tlwh[2] / tlwh[3] > 1.6 if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical: online_tlwhs.append(tlwh) online_ids.append(t.track_id) online_scores.append(t.score) # save results # image_path=os.path.join("DiffusionTrack/datasets/dancetrack/train",info_imgs[4][0]) # raw_image= cv2.imread(image_path) # online_im = plot_tracking( # raw_image, online_tlwhs, online_ids, frame_id=frame_id, fps=30 # ) # os.makedirs("DiffusionTrack/vis_fold/{}".format(video_name),exist_ok=True) # cv2.imwrite("DiffusionTrack/vis_fold/{}/{:0>5d}.jpg".format(video_name,frame_id),online_im) results.append((frame_id, online_tlwhs, online_ids, online_scores)) if cur_iter == len(self.dataloader) - 1: result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id])) write_results(result_filename, results) print("diffusion track fps : {}".format(2*n_samples/track_time)) statistics = torch.cuda.FloatTensor([0, track_time, n_samples]) if distributed: data_list = gather(data_list, dst=0) data_list = list(itertools.chain(*data_list)) torch.distributed.reduce(statistics, dst=0) eval_results = self.evaluate_prediction(data_list, statistics) synchronize() return eval_results def convert_to_coco_format(self, output, info_imgs, ids): data_list = [] scale = min( self.img_size[0] / float(info_imgs[0]), self.img_size[1] / float(info_imgs[1]) ) bboxes = [] clses = [] scores = [] if len(output)>0: for t in output: bboxes.append(t._tlwh) clses.append(0) scores.append(t.score) bboxes=np.array(bboxes) bboxes /= scale # bboxes = xyxy2xywh(bboxes) for ind in range(len(bboxes)): label = self.dataloader.dataset.class_ids[int(clses[ind])] pred_data = { "image_id": int(ids[0]), "category_id": label, "bbox": bboxes[ind].tolist(), "score": float(scores[ind]), "segmentation": [], } # COCO json format data_list.append(pred_data) return data_list,scale def evaluate_prediction(self, data_dict, statistics): if not is_main_process(): return 0, 0, None logger.info("Evaluate in main process...") annType = ["segm", "bbox", "keypoints"] inference_time = statistics[0].item() track_time = statistics[1].item() n_samples = statistics[2].item() a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size) a_track_time = 1000 * track_time / (n_samples * self.dataloader.batch_size) time_info = ", ".join( [ "Average {} time: {:.2f} ms".format(k, v) for k, v in zip( ["forward", "track", "inference"], [a_infer_time, a_track_time, (a_infer_time + a_track_time)], ) ] ) info = time_info + "\n" # Evaluate the Dt (detection) json comparing with the ground truth if len(data_dict) > 0: cocoGt = self.dataloader.dataset.coco # TODO: since pycocotools can't process dict in py36, write data to json file. _, tmp = tempfile.mkstemp() json.dump(data_dict, open(tmp, "w")) cocoDt = cocoGt.loadRes(tmp) ''' try: from yolox.layers import COCOeval_opt as COCOeval except ImportError: from pycocotools import cocoeval as COCOeval logger.warning("Use standard COCOeval.") ''' #from pycocotools.cocoeval import COCOeval from yolox.layers import COCOeval_opt as COCOeval cocoEval = COCOeval(cocoGt, cocoDt, annType[1]) cocoEval.evaluate() cocoEval.accumulate() redirect_string = io.StringIO() with contextlib.redirect_stdout(redirect_string): cocoEval.summarize() info += redirect_string.getvalue() return cocoEval.stats[0], cocoEval.stats[1], info else: return 0, 0, info ================================================ FILE: yolox/evaluators/evaluation.py ================================================ import os import numpy as np import copy import motmetrics as mm mm.lap.default_solver = 'lap' class Evaluator(object): def __init__(self, data_root, seq_name, data_type): self.data_root = data_root self.seq_name = seq_name self.data_type = data_type self.load_annotations() self.reset_accumulator() def load_annotations(self): assert self.data_type == 'mot' gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt') self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True) self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True) def reset_accumulator(self): self.acc = mm.MOTAccumulator(auto_id=True) def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False): # results trk_tlwhs = np.copy(trk_tlwhs) trk_ids = np.copy(trk_ids) # gts gt_objs = self.gt_frame_dict.get(frame_id, []) gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2] # ignore boxes ignore_objs = self.gt_ignore_frame_dict.get(frame_id, []) ignore_tlwhs = unzip_objs(ignore_objs)[0] # remove ignored results keep = np.ones(len(trk_tlwhs), dtype=bool) iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5) if len(iou_distance) > 0: match_is, match_js = mm.lap.linear_sum_assignment(iou_distance) match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js]) match_ious = iou_distance[match_is, match_js] match_js = np.asarray(match_js, dtype=int) match_js = match_js[np.logical_not(np.isnan(match_ious))] keep[match_js] = False trk_tlwhs = trk_tlwhs[keep] trk_ids = trk_ids[keep] #match_is, match_js = mm.lap.linear_sum_assignment(iou_distance) #match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js]) #match_ious = iou_distance[match_is, match_js] #match_js = np.asarray(match_js, dtype=int) #match_js = match_js[np.logical_not(np.isnan(match_ious))] #keep[match_js] = False #trk_tlwhs = trk_tlwhs[keep] #trk_ids = trk_ids[keep] # get distance matrix iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5) # acc self.acc.update(gt_ids, trk_ids, iou_distance) if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'): events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics else: events = None return events def eval_file(self, filename): self.reset_accumulator() result_frame_dict = read_results(filename, self.data_type, is_gt=False) #frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys()))) frames = sorted(list(set(result_frame_dict.keys()))) for frame_id in frames: trk_objs = result_frame_dict.get(frame_id, []) trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2] self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False) return self.acc @staticmethod def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')): names = copy.deepcopy(names) if metrics is None: metrics = mm.metrics.motchallenge_metrics metrics = copy.deepcopy(metrics) mh = mm.metrics.create() summary = mh.compute_many( accs, metrics=metrics, names=names, generate_overall=True ) return summary @staticmethod def save_summary(summary, filename): import pandas as pd writer = pd.ExcelWriter(filename) summary.to_excel(writer) writer.save() def read_results(filename, data_type: str, is_gt=False, is_ignore=False): if data_type in ('mot', 'lab'): read_fun = read_mot_results else: raise ValueError('Unknown data type: {}'.format(data_type)) return read_fun(filename, is_gt, is_ignore) """ labels={'ped', ... % 1 'person_on_vhcl', ... % 2 'car', ... % 3 'bicycle', ... % 4 'mbike', ... % 5 'non_mot_vhcl', ... % 6 'static_person', ... % 7 'distractor', ... % 8 'occluder', ... % 9 'occluder_on_grnd', ... %10 'occluder_full', ... % 11 'reflection', ... % 12 'crowd' ... % 13 }; """ def read_mot_results(filename, is_gt, is_ignore): valid_labels = {1} ignore_labels = {2, 7, 8, 12} results_dict = dict() if os.path.isfile(filename): with open(filename, 'r') as f: for line in f.readlines(): linelist = line.split(',') if len(linelist) < 7: continue fid = int(linelist[0]) if fid < 1: continue results_dict.setdefault(fid, list()) box_size = float(linelist[4]) * float(linelist[5]) if is_gt: if 'MOT16-' in filename or 'MOT17-' in filename: label = int(float(linelist[7])) mark = int(float(linelist[6])) if mark == 0 or label not in valid_labels: continue score = 1 elif is_ignore: if 'MOT16-' in filename or 'MOT17-' in filename: label = int(float(linelist[7])) vis_ratio = float(linelist[8]) if label not in ignore_labels and vis_ratio >= 0: continue else: continue score = 1 else: score = float(linelist[6]) #if box_size > 7000: #if box_size <= 7000 or box_size >= 15000: #if box_size < 15000: #continue tlwh = tuple(map(float, linelist[2:6])) target_id = int(linelist[1]) results_dict[fid].append((tlwh, target_id, score)) return results_dict def unzip_objs(objs): if len(objs) > 0: tlwhs, ids, scores = zip(*objs) else: tlwhs, ids, scores = [], [], [] tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4) return tlwhs, ids, scores ================================================ FILE: yolox/exp/__init__.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. from .base_exp import BaseExp from .build import get_exp from .yolox_base import Exp ================================================ FILE: yolox/exp/base_exp.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import torch from torch.nn import Module from yolox.utils import LRScheduler import ast import pprint from abc import ABCMeta, abstractmethod from tabulate import tabulate from typing import Dict class BaseExp(metaclass=ABCMeta): """Basic class for any experiment.""" def __init__(self): self.seed = None self.output_dir = "./DiffusionTrack_outputs" self.print_interval = 100 self.eval_interval = 10 @abstractmethod def get_model(self) -> Module: pass @abstractmethod def get_data_loader( self, batch_size: int, is_distributed: bool ) -> Dict[str, torch.utils.data.DataLoader]: pass @abstractmethod def get_optimizer(self, batch_size: int) -> torch.optim.Optimizer: pass @abstractmethod def get_lr_scheduler( self, lr: float, iters_per_epoch: int, **kwargs ) -> LRScheduler: pass @abstractmethod def get_evaluator(self): pass @abstractmethod def eval(self, model, evaluator, weights): pass def __repr__(self): table_header = ["keys", "values"] exp_table = [ (str(k), pprint.pformat(v)) for k, v in vars(self).items() if not k.startswith("_") ] return tabulate(exp_table, headers=table_header, tablefmt="fancy_grid") def merge(self, cfg_list): assert len(cfg_list) % 2 == 0 for k, v in zip(cfg_list[0::2], cfg_list[1::2]): # only update value with same key if hasattr(self, k): src_value = getattr(self, k) src_type = type(src_value) if src_value is not None and src_type != type(v): try: v = src_type(v) except Exception: v = ast.literal_eval(v) setattr(self, k, v) ================================================ FILE: yolox/exp/build.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import importlib import os import sys def get_exp_by_file(exp_file): try: sys.path.append(os.path.dirname(exp_file)) current_exp = importlib.import_module(os.path.basename(exp_file).split(".")[0]) exp = current_exp.Exp() except Exception: raise ImportError("{} doesn't contains class named 'Exp'".format(exp_file)) return exp def get_exp_by_name(exp_name): import yolox yolox_path = os.path.dirname(os.path.dirname(yolox.__file__)) filedict = { "yolox-s": "yolox_s.py", "yolox-m": "yolox_m.py", "yolox-l": "yolox_l.py", "yolox-x": "yolox_x.py", "yolox-tiny": "yolox_tiny.py", "yolox-nano": "nano.py", "yolov3": "yolov3.py", } filename = filedict[exp_name] exp_path = os.path.join(yolox_path, "exps", "default", filename) return get_exp_by_file(exp_path) def get_exp(exp_file, exp_name): """ get Exp object by file or name. If exp_file and exp_name are both provided, get Exp by exp_file. Args: exp_file (str): file path of experiment. exp_name (str): name of experiment. "yolo-s", """ assert ( exp_file is not None or exp_name is not None ), "plz provide exp file or exp name." if exp_file is not None: return get_exp_by_file(exp_file) else: return get_exp_by_name(exp_name) ================================================ FILE: yolox/exp/yolox_base.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import torch import torch.distributed as dist import torch.nn as nn import os import random from .base_exp import BaseExp class Exp(BaseExp): def __init__(self): super().__init__() # ---------------- model config ---------------- # self.num_classes = 80 self.depth = 1.00 self.width = 1.00 # ---------------- dataloader config ---------------- # # set worker to 4 for shorter dataloader init time self.data_num_workers = 4 self.input_size = (640, 640) self.random_size = (14, 26) self.train_ann = "instances_train2017.json" self.val_ann = "instances_val2017.json" # --------------- transform config ----------------- # self.degrees = 10.0 self.translate = 0.1 self.scale = (0.1, 2) self.mscale = (0.8, 1.6) self.shear = 2.0 self.perspective = 0.0 self.enable_mixup = True # -------------- training config --------------------- # self.warmup_epochs = 5 self.max_epoch = 300 self.warmup_lr = 0 self.basic_lr_per_img = 0.01 / 64.0 self.scheduler = "yoloxwarmcos" self.no_aug_epochs = 15 self.min_lr_ratio = 0.05 self.ema = True self.weight_decay = 5e-4 self.momentum = 0.9 self.print_interval = 10 self.eval_interval = 10 self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0] # ----------------- testing config ------------------ # self.test_size = (640, 640) # self.test_conf = 0.001 # self.nmsthre = 0.65 self.random_flip=False def get_model(self): from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead def init_yolo(M): for m in M.modules(): if isinstance(m, nn.BatchNorm2d): m.eps = 1e-3 m.momentum = 0.03 if getattr(self, "model", None) is None: in_channels = [256, 512, 1024] backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels) head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels) self.model = YOLOX(backbone, head) self.model.apply(init_yolo) self.model.head.initialize_biases(1e-2) return self.model def get_data_loader(self, batch_size, is_distributed, no_aug=False): from yolox.data import ( COCODataset, DataLoader, InfiniteSampler, MosaicDetection, TrainTransform, YoloBatchSampler ) dataset = COCODataset( data_dir=None, json_file=self.train_ann, img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=50, ), ) dataset = MosaicDetection( dataset, mosaic=not no_aug, img_size=self.input_size, preproc=TrainTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_labels=120, ), degrees=self.degrees, translate=self.translate, scale=self.scale, shear=self.shear, perspective=self.perspective, enable_mixup=self.enable_mixup, ) self.dataset = dataset if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = InfiniteSampler(len(self.dataset), seed=self.seed if self.seed else 0) batch_sampler = YoloBatchSampler( sampler=sampler, batch_size=batch_size, drop_last=False, input_dimension=self.input_size, mosaic=not no_aug, ) dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True} dataloader_kwargs["batch_sampler"] = batch_sampler train_loader = DataLoader(self.dataset, **dataloader_kwargs) return train_loader def random_resize(self, data_loader, epoch, rank, is_distributed): tensor = torch.LongTensor(2).cuda() if rank == 0: size_factor = self.input_size[1] * 1.0 / self.input_size[0] size = random.randint(*self.random_size) size = (int(32 * size), 32 * int(size * size_factor)) tensor[0] = size[0] tensor[1] = size[1] if is_distributed: dist.barrier() dist.broadcast(tensor, 0) input_size = data_loader.change_input_dim( multiple=(tensor[0].item(), tensor[1].item()), random_range=None ) return input_size def get_optimizer(self, batch_size): if "optimizer" not in self.__dict__: if self.warmup_epochs > 0: lr = self.warmup_lr else: lr = self.basic_lr_per_img * batch_size pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in self.model.named_modules(): if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d) or "bn" in k: pg0.append(v.weight) # no decay elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay optimizer = torch.optim.SGD( pg0, lr=lr, momentum=self.momentum, nesterov=True ) optimizer.add_param_group( {"params": pg1, "weight_decay": self.weight_decay} ) # add pg1 with weight_decay optimizer.add_param_group({"params": pg2}) self.optimizer = optimizer return self.optimizer def get_lr_scheduler(self, lr, iters_per_epoch): from yolox.utils import LRScheduler scheduler = LRScheduler( self.scheduler, lr, iters_per_epoch, self.max_epoch, warmup_epochs=self.warmup_epochs, warmup_lr_start=self.warmup_lr, no_aug_epochs=self.no_aug_epochs, min_lr_ratio=self.min_lr_ratio, ) return scheduler def get_eval_loader(self, batch_size, is_distributed, testdev=False): from yolox.data import COCODataset, ValTransform valdataset = COCODataset( data_dir=None, json_file=self.val_ann if not testdev else "image_info_test-dev2017.json", name="val2017" if not testdev else "test2017", img_size=self.test_size, preproc=ValTransform( rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225) ), ) if is_distributed: batch_size = batch_size // dist.get_world_size() sampler = torch.utils.data.distributed.DistributedSampler( valdataset, shuffle=False ) else: sampler = torch.utils.data.SequentialSampler(valdataset) dataloader_kwargs = { "num_workers": self.data_num_workers, "pin_memory": True, "sampler": sampler, } dataloader_kwargs["batch_size"] = batch_size val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs) return val_loader def get_evaluator(self, batch_size, is_distributed, testdev=False): from yolox.evaluators import COCOEvaluator val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev) evaluator = COCOEvaluator( dataloader=val_loader, img_size=self.test_size, confthre=self.test_conf, nmsthre=self.nmsthre, num_classes=self.num_classes, testdev=testdev, ) return evaluator def eval(self, model, evaluator, is_distributed, half=False): return evaluator.evaluate(model, is_distributed, half) ================================================ FILE: yolox/layers/__init__.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. from .fast_coco_eval_api import COCOeval_opt ================================================ FILE: yolox/layers/csrc/cocoeval/cocoeval.cpp ================================================ // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved #include "cocoeval.h" #include #include #include #include using namespace pybind11::literals; namespace COCOeval { // Sort detections from highest score to lowest, such that // detection_instances[detection_sorted_indices[t]] >= // detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match // original COCO API void SortInstancesByDetectionScore( const std::vector& detection_instances, std::vector* detection_sorted_indices) { detection_sorted_indices->resize(detection_instances.size()); std::iota( detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); std::stable_sort( detection_sorted_indices->begin(), detection_sorted_indices->end(), [&detection_instances](size_t j1, size_t j2) { return detection_instances[j1].score > detection_instances[j2].score; }); } // Partition the ground truth objects based on whether or not to ignore them // based on area void SortInstancesByIgnore( const std::array& area_range, const std::vector& ground_truth_instances, std::vector* ground_truth_sorted_indices, std::vector* ignores) { ignores->clear(); ignores->reserve(ground_truth_instances.size()); for (auto o : ground_truth_instances) { ignores->push_back( o.ignore || o.area < area_range[0] || o.area > area_range[1]); } ground_truth_sorted_indices->resize(ground_truth_instances.size()); std::iota( ground_truth_sorted_indices->begin(), ground_truth_sorted_indices->end(), 0); std::stable_sort( ground_truth_sorted_indices->begin(), ground_truth_sorted_indices->end(), [&ignores](size_t j1, size_t j2) { return (int)(*ignores)[j1] < (int)(*ignores)[j2]; }); } // For each IOU threshold, greedily match each detected instance to a ground // truth instance (if possible) and store the results void MatchDetectionsToGroundTruth( const std::vector& detection_instances, const std::vector& detection_sorted_indices, const std::vector& ground_truth_instances, const std::vector& ground_truth_sorted_indices, const std::vector& ignores, const std::vector>& ious, const std::vector& iou_thresholds, const std::array& area_range, ImageEvaluation* results) { // Initialize memory to store return data matches and ignore const int num_iou_thresholds = iou_thresholds.size(); const int num_ground_truth = ground_truth_sorted_indices.size(); const int num_detections = detection_sorted_indices.size(); std::vector ground_truth_matches( num_iou_thresholds * num_ground_truth, 0); std::vector& detection_matches = results->detection_matches; std::vector& detection_ignores = results->detection_ignores; std::vector& ground_truth_ignores = results->ground_truth_ignores; detection_matches.resize(num_iou_thresholds * num_detections, 0); detection_ignores.resize(num_iou_thresholds * num_detections, false); ground_truth_ignores.resize(num_ground_truth); for (auto g = 0; g < num_ground_truth; ++g) { ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]]; } for (auto t = 0; t < num_iou_thresholds; ++t) { for (auto d = 0; d < num_detections; ++d) { // information about best match so far (match=-1 -> unmatched) double best_iou = std::min(iou_thresholds[t], 1 - 1e-10); int match = -1; for (auto g = 0; g < num_ground_truth; ++g) { // if this ground truth instance is already matched and not a // crowd, it cannot be matched to another detection if (ground_truth_matches[t * num_ground_truth + g] > 0 && !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) { continue; } // if detected instance matched to a regular ground truth // instance, we can break on the first ground truth instance // tagged as ignore (because they are sorted by the ignore tag) if (match >= 0 && !ground_truth_ignores[match] && ground_truth_ignores[g]) { break; } // if IOU overlap is the best so far, store the match appropriately if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) { best_iou = ious[d][ground_truth_sorted_indices[g]]; match = g; } } // if match was made, store id of match for both detection and // ground truth if (match >= 0) { detection_ignores[t * num_detections + d] = ground_truth_ignores[match]; detection_matches[t * num_detections + d] = ground_truth_instances[ground_truth_sorted_indices[match]].id; ground_truth_matches[t * num_ground_truth + match] = detection_instances[detection_sorted_indices[d]].id; } // set unmatched detections outside of area range to ignore const InstanceAnnotation& detection = detection_instances[detection_sorted_indices[d]]; detection_ignores[t * num_detections + d] = detection_ignores[t * num_detections + d] || (detection_matches[t * num_detections + d] == 0 && (detection.area < area_range[0] || detection.area > area_range[1])); } } // store detection score results results->detection_scores.resize(detection_sorted_indices.size()); for (size_t d = 0; d < detection_sorted_indices.size(); ++d) { results->detection_scores[d] = detection_instances[detection_sorted_indices[d]].score; } } std::vector EvaluateImages( const std::vector>& area_ranges, int max_detections, const std::vector& iou_thresholds, const ImageCategoryInstances>& image_category_ious, const ImageCategoryInstances& image_category_ground_truth_instances, const ImageCategoryInstances& image_category_detection_instances) { const int num_area_ranges = area_ranges.size(); const int num_images = image_category_ground_truth_instances.size(); const int num_categories = image_category_ious.size() > 0 ? image_category_ious[0].size() : 0; std::vector detection_sorted_indices; std::vector ground_truth_sorted_indices; std::vector ignores; std::vector results_all( num_images * num_area_ranges * num_categories); // Store results for each image, category, and area range combination. Results // for each IOU threshold are packed into the same ImageEvaluation object for (auto i = 0; i < num_images; ++i) { for (auto c = 0; c < num_categories; ++c) { const std::vector& ground_truth_instances = image_category_ground_truth_instances[i][c]; const std::vector& detection_instances = image_category_detection_instances[i][c]; SortInstancesByDetectionScore( detection_instances, &detection_sorted_indices); if ((int)detection_sorted_indices.size() > max_detections) { detection_sorted_indices.resize(max_detections); } for (size_t a = 0; a < area_ranges.size(); ++a) { SortInstancesByIgnore( area_ranges[a], ground_truth_instances, &ground_truth_sorted_indices, &ignores); MatchDetectionsToGroundTruth( detection_instances, detection_sorted_indices, ground_truth_instances, ground_truth_sorted_indices, ignores, image_category_ious[i][c], iou_thresholds, area_ranges[a], &results_all [c * num_area_ranges * num_images + a * num_images + i]); } } } return results_all; } // Convert a python list to a vector template std::vector list_to_vec(const py::list& l) { std::vector v(py::len(l)); for (int i = 0; i < (int)py::len(l); ++i) { v[i] = l[i].cast(); } return v; } // Helper function to Accumulate() // Considers the evaluation results applicable to a particular category, area // range, and max_detections parameter setting, which begin at // evaluations[evaluation_index]. Extracts a sorted list of length n of all // applicable detection instances concatenated across all images in the dataset, // which are represented by the outputs evaluation_indices, detection_scores, // image_detection_indices, and detection_sorted_indices--all of which are // length n. evaluation_indices[i] stores the applicable index into // evaluations[] for instance i, which has detection score detection_score[i], // and is the image_detection_indices[i]'th of the list of detections // for the image containing i. detection_sorted_indices[] defines a sorted // permutation of the 3 other outputs int BuildSortedDetectionList( const std::vector& evaluations, const int64_t evaluation_index, const int64_t num_images, const int max_detections, std::vector* evaluation_indices, std::vector* detection_scores, std::vector* detection_sorted_indices, std::vector* image_detection_indices) { assert(evaluations.size() >= evaluation_index + num_images); // Extract a list of object instances of the applicable category, area // range, and max detections requirements such that they can be sorted image_detection_indices->clear(); evaluation_indices->clear(); detection_scores->clear(); image_detection_indices->reserve(num_images * max_detections); evaluation_indices->reserve(num_images * max_detections); detection_scores->reserve(num_images * max_detections); int num_valid_ground_truth = 0; for (auto i = 0; i < num_images; ++i) { const ImageEvaluation& evaluation = evaluations[evaluation_index + i]; for (int d = 0; d < (int)evaluation.detection_scores.size() && d < max_detections; ++d) { // detected instances evaluation_indices->push_back(evaluation_index + i); image_detection_indices->push_back(d); detection_scores->push_back(evaluation.detection_scores[d]); } for (auto ground_truth_ignore : evaluation.ground_truth_ignores) { if (!ground_truth_ignore) { ++num_valid_ground_truth; } } } // Sort detections by decreasing score, using stable sort to match // python implementation detection_sorted_indices->resize(detection_scores->size()); std::iota( detection_sorted_indices->begin(), detection_sorted_indices->end(), 0); std::stable_sort( detection_sorted_indices->begin(), detection_sorted_indices->end(), [&detection_scores](size_t j1, size_t j2) { return (*detection_scores)[j1] > (*detection_scores)[j2]; }); return num_valid_ground_truth; } // Helper function to Accumulate() // Compute a precision recall curve given a sorted list of detected instances // encoded in evaluations, evaluation_indices, detection_scores, // detection_sorted_indices, image_detection_indices (see // BuildSortedDetectionList()). Using vectors precisions and recalls // and temporary storage, output the results into precisions_out, recalls_out, // and scores_out, which are large buffers containing many precion/recall curves // for all possible parameter settings, with precisions_out_index and // recalls_out_index defining the applicable indices to store results. void ComputePrecisionRecallCurve( const int64_t precisions_out_index, const int64_t precisions_out_stride, const int64_t recalls_out_index, const std::vector& recall_thresholds, const int iou_threshold_index, const int num_iou_thresholds, const int num_valid_ground_truth, const std::vector& evaluations, const std::vector& evaluation_indices, const std::vector& detection_scores, const std::vector& detection_sorted_indices, const std::vector& image_detection_indices, std::vector* precisions, std::vector* recalls, std::vector* precisions_out, std::vector* scores_out, std::vector* recalls_out) { assert(recalls_out->size() > recalls_out_index); // Compute precision/recall for each instance in the sorted list of detections int64_t true_positives_sum = 0, false_positives_sum = 0; precisions->clear(); recalls->clear(); precisions->reserve(detection_sorted_indices.size()); recalls->reserve(detection_sorted_indices.size()); assert(!evaluations.empty() || detection_sorted_indices.empty()); for (auto detection_sorted_index : detection_sorted_indices) { const ImageEvaluation& evaluation = evaluations[evaluation_indices[detection_sorted_index]]; const auto num_detections = evaluation.detection_matches.size() / num_iou_thresholds; const auto detection_index = iou_threshold_index * num_detections + image_detection_indices[detection_sorted_index]; assert(evaluation.detection_matches.size() > detection_index); assert(evaluation.detection_ignores.size() > detection_index); const int64_t detection_match = evaluation.detection_matches[detection_index]; const bool detection_ignores = evaluation.detection_ignores[detection_index]; const auto true_positive = detection_match > 0 && !detection_ignores; const auto false_positive = detection_match == 0 && !detection_ignores; if (true_positive) { ++true_positives_sum; } if (false_positive) { ++false_positives_sum; } const double recall = static_cast(true_positives_sum) / num_valid_ground_truth; recalls->push_back(recall); const int64_t num_valid_detections = true_positives_sum + false_positives_sum; const double precision = num_valid_detections > 0 ? static_cast(true_positives_sum) / num_valid_detections : 0.0; precisions->push_back(precision); } (*recalls_out)[recalls_out_index] = !recalls->empty() ? recalls->back() : 0; for (int64_t i = static_cast(precisions->size()) - 1; i > 0; --i) { if ((*precisions)[i] > (*precisions)[i - 1]) { (*precisions)[i - 1] = (*precisions)[i]; } } // Sample the per instance precision/recall list at each recall threshold for (size_t r = 0; r < recall_thresholds.size(); ++r) { // first index in recalls >= recall_thresholds[r] std::vector::iterator low = std::lower_bound( recalls->begin(), recalls->end(), recall_thresholds[r]); size_t precisions_index = low - recalls->begin(); const auto results_ind = precisions_out_index + r * precisions_out_stride; assert(results_ind < precisions_out->size()); assert(results_ind < scores_out->size()); if (precisions_index < precisions->size()) { (*precisions_out)[results_ind] = (*precisions)[precisions_index]; (*scores_out)[results_ind] = detection_scores[detection_sorted_indices[precisions_index]]; } else { (*precisions_out)[results_ind] = 0; (*scores_out)[results_ind] = 0; } } } py::dict Accumulate( const py::object& params, const std::vector& evaluations) { const std::vector recall_thresholds = list_to_vec(params.attr("recThrs")); const std::vector max_detections = list_to_vec(params.attr("maxDets")); const int num_iou_thresholds = py::len(params.attr("iouThrs")); const int num_recall_thresholds = py::len(params.attr("recThrs")); const int num_categories = params.attr("useCats").cast() == 1 ? py::len(params.attr("catIds")) : 1; const int num_area_ranges = py::len(params.attr("areaRng")); const int num_max_detections = py::len(params.attr("maxDets")); const int num_images = py::len(params.attr("imgIds")); std::vector precisions_out( num_iou_thresholds * num_recall_thresholds * num_categories * num_area_ranges * num_max_detections, -1); std::vector recalls_out( num_iou_thresholds * num_categories * num_area_ranges * num_max_detections, -1); std::vector scores_out( num_iou_thresholds * num_recall_thresholds * num_categories * num_area_ranges * num_max_detections, -1); // Consider the list of all detected instances in the entire dataset in one // large list. evaluation_indices, detection_scores, // image_detection_indices, and detection_sorted_indices all have the same // length as this list, such that each entry corresponds to one detected // instance std::vector evaluation_indices; // indices into evaluations[] std::vector detection_scores; // detection scores of each instance std::vector detection_sorted_indices; // sorted indices of all // instances in the dataset std::vector image_detection_indices; // indices into the list of detected instances in // the same image as each instance std::vector precisions, recalls; for (auto c = 0; c < num_categories; ++c) { for (auto a = 0; a < num_area_ranges; ++a) { for (auto m = 0; m < num_max_detections; ++m) { // The COCO PythonAPI assumes evaluations[] (the return value of // COCOeval::EvaluateImages() is one long list storing results for each // combination of category, area range, and image id, with categories in // the outermost loop and images in the innermost loop. const int64_t evaluations_index = c * num_area_ranges * num_images + a * num_images; int num_valid_ground_truth = BuildSortedDetectionList( evaluations, evaluations_index, num_images, max_detections[m], &evaluation_indices, &detection_scores, &detection_sorted_indices, &image_detection_indices); if (num_valid_ground_truth == 0) { continue; } for (auto t = 0; t < num_iou_thresholds; ++t) { // recalls_out is a flattened vectors representing a // num_iou_thresholds X num_categories X num_area_ranges X // num_max_detections matrix const int64_t recalls_out_index = t * num_categories * num_area_ranges * num_max_detections + c * num_area_ranges * num_max_detections + a * num_max_detections + m; // precisions_out and scores_out are flattened vectors // representing a num_iou_thresholds X num_recall_thresholds X // num_categories X num_area_ranges X num_max_detections matrix const int64_t precisions_out_stride = num_categories * num_area_ranges * num_max_detections; const int64_t precisions_out_index = t * num_recall_thresholds * num_categories * num_area_ranges * num_max_detections + c * num_area_ranges * num_max_detections + a * num_max_detections + m; ComputePrecisionRecallCurve( precisions_out_index, precisions_out_stride, recalls_out_index, recall_thresholds, t, num_iou_thresholds, num_valid_ground_truth, evaluations, evaluation_indices, detection_scores, detection_sorted_indices, image_detection_indices, &precisions, &recalls, &precisions_out, &scores_out, &recalls_out); } } } } time_t rawtime; struct tm local_time; std::array buffer; time(&rawtime); #ifdef _WIN32 localtime_s(&local_time, &rawtime); #else localtime_r(&rawtime, &local_time); #endif strftime( buffer.data(), 200, "%Y-%m-%d %H:%num_max_detections:%S", &local_time); return py::dict( "params"_a = params, "counts"_a = std::vector({num_iou_thresholds, num_recall_thresholds, num_categories, num_area_ranges, num_max_detections}), "date"_a = buffer, "precision"_a = precisions_out, "recall"_a = recalls_out, "scores"_a = scores_out); } } // namespace COCOeval ================================================ FILE: yolox/layers/csrc/cocoeval/cocoeval.h ================================================ // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved #pragma once #include #include #include #include #include namespace py = pybind11; namespace COCOeval { // Annotation data for a single object instance in an image struct InstanceAnnotation { InstanceAnnotation( uint64_t id, double score, double area, bool is_crowd, bool ignore) : id{id}, score{score}, area{area}, is_crowd{is_crowd}, ignore{ignore} {} uint64_t id; double score = 0.; double area = 0.; bool is_crowd = false; bool ignore = false; }; // Stores intermediate results for evaluating detection results for a single // image that has D detected instances and G ground truth instances. This stores // matches between detected and ground truth instances struct ImageEvaluation { // For each of the D detected instances, the id of the matched ground truth // instance, or 0 if unmatched std::vector detection_matches; // The detection score of each of the D detected instances std::vector detection_scores; // Marks whether or not each of G instances was ignored from evaluation (e.g., // because it's outside area_range) std::vector ground_truth_ignores; // Marks whether or not each of D instances was ignored from evaluation (e.g., // because it's outside aRng) std::vector detection_ignores; }; template using ImageCategoryInstances = std::vector>>; // C++ implementation of COCO API cocoeval.py::COCOeval.evaluateImg(). For each // combination of image, category, area range settings, and IOU thresholds to // evaluate, it matches detected instances to ground truth instances and stores // the results into a vector of ImageEvaluation results, which will be // interpreted by the COCOeval::Accumulate() function to produce precion-recall // curves. The parameters of nested vectors have the following semantics: // image_category_ious[i][c][d][g] is the intersection over union of the d'th // detected instance and g'th ground truth instance of // category category_ids[c] in image image_ids[i] // image_category_ground_truth_instances[i][c] is a vector of ground truth // instances in image image_ids[i] of category category_ids[c] // image_category_detection_instances[i][c] is a vector of detected // instances in image image_ids[i] of category category_ids[c] std::vector EvaluateImages( const std::vector>& area_ranges, // vector of 2-tuples int max_detections, const std::vector& iou_thresholds, const ImageCategoryInstances>& image_category_ious, const ImageCategoryInstances& image_category_ground_truth_instances, const ImageCategoryInstances& image_category_detection_instances); // C++ implementation of COCOeval.accumulate(), which generates precision // recall curves for each set of category, IOU threshold, detection area range, // and max number of detections parameters. It is assumed that the parameter // evaluations is the return value of the functon COCOeval::EvaluateImages(), // which was called with the same parameter settings params py::dict Accumulate( const py::object& params, const std::vector& evalutations); } // namespace COCOeval ================================================ FILE: yolox/layers/csrc/vision.cpp ================================================ #include "cocoeval/cocoeval.h" PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("COCOevalAccumulate", &COCOeval::Accumulate, "COCOeval::Accumulate"); m.def( "COCOevalEvaluateImages", &COCOeval::EvaluateImages, "COCOeval::EvaluateImages"); pybind11::class_(m, "InstanceAnnotation") .def(pybind11::init()); pybind11::class_(m, "ImageEvaluation") .def(pybind11::init<>()); } ================================================ FILE: yolox/layers/fast_coco_eval_api.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # This file comes from # https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/fast_eval_api.py # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import numpy as np from pycocotools.cocoeval import COCOeval # import torch first to make yolox._C work without ImportError of libc10.so # in YOLOX, env is already set in __init__.py. from yolox import _C import copy import time class COCOeval_opt(COCOeval): """ This is a slightly modified version of the original COCO API, where the functions evaluateImg() and accumulate() are implemented in C++ to speedup evaluation """ def evaluate(self): """ Run per image evaluation on given images and store results in self.evalImgs_cpp, a datastructure that isn't readable from Python but is used by a c++ implementation of accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure self.evalImgs because this datastructure is a computational bottleneck. :return: None """ tic = time.time() print("Running per image evaluation...") p = self.params # add backward compatibility if useSegm is specified in params if p.useSegm is not None: p.iouType = "segm" if p.useSegm == 1 else "bbox" print( "useSegm (deprecated) is not None. Running {} evaluation".format( p.iouType ) ) print("Evaluate annotation type *{}*".format(p.iouType)) p.imgIds = list(np.unique(p.imgIds)) if p.useCats: p.catIds = list(np.unique(p.catIds)) p.maxDets = sorted(p.maxDets) self.params = p self._prepare() # loop through images, area range, max detection number catIds = p.catIds if p.useCats else [-1] if p.iouType == "segm" or p.iouType == "bbox": computeIoU = self.computeIoU elif p.iouType == "keypoints": computeIoU = self.computeOks self.ious = { (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds } maxDet = p.maxDets[-1] # <<<< Beginning of code differences with original COCO API def convert_instances_to_cpp(instances, is_det=False): # Convert annotations for a list of instances in an image to a format that's fast # to access in C++ instances_cpp = [] for instance in instances: instance_cpp = _C.InstanceAnnotation( int(instance["id"]), instance["score"] if is_det else instance.get("score", 0.0), instance["area"], bool(instance.get("iscrowd", 0)), bool(instance.get("ignore", 0)), ) instances_cpp.append(instance_cpp) return instances_cpp # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++ ground_truth_instances = [ [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds] for imgId in p.imgIds ] detected_instances = [ [ convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds ] for imgId in p.imgIds ] ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds] if not p.useCats: # For each image, flatten per-category lists into a single list ground_truth_instances = [ [[o for c in i for o in c]] for i in ground_truth_instances ] detected_instances = [ [[o for c in i for o in c]] for i in detected_instances ] # Call C++ implementation of self.evaluateImgs() self._evalImgs_cpp = _C.COCOevalEvaluateImages( p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances, ) self._evalImgs = None self._paramsEval = copy.deepcopy(self.params) toc = time.time() print("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic)) # >>>> End of code differences with original COCO API def accumulate(self): """ Accumulate per image evaluation results and store the result in self.eval. Does not support changing parameter settings from those used by self.evaluate() """ print("Accumulating evaluation results...") tic = time.time() if not hasattr(self, "_evalImgs_cpp"): print("Please run evaluate() first") self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp) # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections self.eval["recall"] = np.array(self.eval["recall"]).reshape( self.eval["counts"][:1] + self.eval["counts"][2:] ) # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X # num_area_ranges X num_max_detections self.eval["precision"] = np.array(self.eval["precision"]).reshape( self.eval["counts"] ) self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"]) toc = time.time() print( "COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic) ) ================================================ FILE: yolox/models/__init__.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. from .darknet import CSPDarknet, Darknet from .losses import IOUloss from .yolo_fpn import YOLOFPN from .yolo_head import YOLOXHead from .yolo_pafpn import YOLOPAFPN from .yolox import YOLOX ================================================ FILE: yolox/models/darknet.py ================================================ #!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. from torch import nn from .network_blocks import BaseConv, CSPLayer, DWConv, Focus, ResLayer, SPPBottleneck class Darknet(nn.Module): # number of blocks from dark2 to dark5. depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]} def __init__( self, depth, in_channels=3, stem_out_channels=32, out_features=("dark3", "dark4", "dark5"), ): """ Args: depth (int): depth of darknet used in model, usually use [21, 53] for this param. in_channels (int): number of input channels, for example, use 3 for RGB image. stem_out_channels (int): number of output chanels of darknet stem. It decides channels of darknet layer2 to layer5. out_features (Tuple[str]): desired output layer name. """ super().__init__() assert out_features, "please provide output features of Darknet" self.out_features = out_features self.stem = nn.Sequential( BaseConv(in_channels, stem_out_channels, ksize=3, stride=1, act="lrelu"), *self.make_group_layer(stem_out_channels, num_blocks=1, stride=2), ) in_channels = stem_out_channels * 2 # 64 num_blocks = Darknet.depth2blocks[depth] # create darknet with `stem_out_channels` and `num_blocks` layers. # to make model structure more clear, we don't use `for` statement in python. self.dark2 = nn.Sequential( *self.make_group_layer(in_channels, num_blocks[0], stride=2) ) in_channels *= 2 # 128 self.dark3 = nn.Sequential( *self.make_group_layer(in_channels, num_blocks[1], stride=2) ) in_channels *= 2 # 256 self.dark4 = nn.Sequential( *self.make_group_layer(in_channels, num_blocks[2], stride=2) ) in_channels *= 2 # 512 self.dark5 = nn.Sequential( *self.make_group_layer(in_channels, num_blocks[3], stride=2), *self.make_spp_block([in_channels, in_channels * 2], in_channels * 2), ) # self.diffusion_freeze() def diffusion_freeze(self): for v in self.stem.parameters(): v.requires_grad=False for v in self.dark2.parameters(): v.requires_grad=False def make_group_layer(self, in_channels: int, num_blocks: int, stride: int = 1): "starts with conv layer then has `num_blocks` `ResLayer`" return [ BaseConv(in_channels, in_channels * 2, ksize=3, stride=stride, act="lrelu"), *[(ResLayer(in_channels * 2)) for _ in range(num_blocks)], ] def make_spp_block(self, filters_list, in_filters): m = nn.Sequential( *[ BaseConv(in_filters, filters_list[0], 1, stride=1, act="lrelu"), BaseConv(filters_list[0], filters_list[1], 3, stride=1, act="lrelu"), SPPBottleneck( in_channels=filters_list[1], out_channels=filters_list[0], activation="lrelu", ), BaseConv(filters_list[0], filters_list[1], 3, stride=1, act="lrelu"), BaseConv(filters_list[1], filters_list[0], 1, stride=1, act="lrelu"), ] ) return m def forward(self, x): outputs = {} x = self.stem(x) outputs["stem"] = x x = self.dark2(x) outputs["dark2"] = x x = self.dark3(x) outputs["dark3"] = x x = self.dark4(x) outputs["dark4"] = x x = self.dark5(x) outputs["dark5"] = x return {k: v for k, v in outputs.items() if k in self.out_features} class CSPDarknet(nn.Module): def __init__( self, dep_mul, wid_mul, out_features=("dark3", "dark4", "dark5"), depthwise=False, act="silu", ): super().__init__() assert out_features, "please provide output features of Darknet" self.out_features = out_features Conv = DWConv if depthwise else BaseConv base_channels = int(wid_mul * 64) # 64 base_depth = max(round(dep_mul * 3), 1) # 3 # stem self.stem = Focus(3, base_channels, ksize=3, act=act) # dark2 self.dark2 = nn.Sequential( Conv(base_channels, base_channels * 2, 3, 2, act=act), CSPLayer( base_channels * 2, base_channels * 2, n=base_depth, depthwise=depthwise, act=act, ), ) # dark3 self.dark3 = nn.Sequential( Conv(base_channels * 2, base_channels * 4, 3, 2, act=act), CSPLayer( base_channels * 4, base_channels * 4, n=base_depth * 3, depthwise=depthwise, act=act, ), ) # dark4 self.dark4 = nn.Sequential( Conv(base_channels * 4, base_channels * 8, 3, 2, act=act), CSPLayer( base_channels * 8, base_channels * 8, n=base_depth * 3, depthwise=depthwise, act=act, ), ) # dark5 self.dark5 = nn.Sequential( Conv(base_channels * 8, base_channels * 16, 3, 2, act=act), SPPBottleneck(base_channels * 16, base_channels * 16, activation=act), CSPLayer( base_channels * 16, base_channels * 16, n=base_depth, shortcut=False, depthwise=depthwise, act=act, ), ) def forward(self, x): outputs = {} x = self.stem(x) outputs["stem"] = x x = self.dark2(x) outputs["dark2"] = x x = self.dark3(x) outputs["dark3"] = x x = self.dark4(x) outputs["dark4"] = x x = self.dark5(x) outputs["dark5"] = x return {k: v for k, v in outputs.items() if k in self.out_features} ================================================ FILE: yolox/models/losses.py ================================================ #!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import torch import torch.nn as nn import torch.nn.functional as F class IOUloss(nn.Module): def __init__(self, reduction="none", loss_type="iou"): super(IOUloss, self).__init__() self.reduction = reduction self.loss_type = loss_type def forward(self, pred, target): assert pred.shape[0] == target.shape[0] pred = pred.view(-1, 4) target = target.view(-1, 4) tl = torch.max( (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2) ) br = torch.min( (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2) ) area_p = torch.prod(pred[:, 2:], 1) area_g = torch.prod(target[:, 2:], 1) en = (tl < br).type(tl.type()).prod(dim=1) area_i = torch.prod(br - tl, 1) * en iou = (area_i) / (area_p + area_g - area_i + 1e-16) if self.loss_type == "iou": loss = 1 - iou ** 2 elif self.loss_type == "giou": c_tl = torch.min( (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2) ) c_br = torch.max( (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2) ) area_c = torch.prod(c_br - c_tl, 1) giou = iou - (area_c - area_i) / area_c.clamp(1e-16) loss = 1 - giou.clamp(min=-1.0, max=1.0) if self.reduction == "mean": loss = loss.mean() elif self.reduction == "sum": loss = loss.sum() return loss def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). alpha: (optional) Weighting factor in range (0,1) to balance positive vs negative examples. Default = -1 (no weighting). gamma: Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss #return loss.mean(0).sum() / num_boxes return loss.sum() / num_boxes ================================================ FILE: yolox/models/network_blocks.py ================================================ #!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import torch import torch.nn as nn class SiLU(nn.Module): """export-friendly version of nn.SiLU()""" @staticmethod def forward(x): return x * torch.sigmoid(x) def get_activation(name="silu", inplace=True): if name == "silu": module = nn.SiLU(inplace=inplace) elif name == "relu": module = nn.ReLU(inplace=inplace) elif name == "lrelu": module = nn.LeakyReLU(0.1, inplace=inplace) else: raise AttributeError("Unsupported act type: {}".format(name)) return module class BaseConv(nn.Module): """A Conv2d -> Batchnorm -> silu/leaky relu block""" def __init__( self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act="silu" ): super().__init__() # same padding pad = (ksize - 1) // 2 self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=ksize, stride=stride, padding=pad, groups=groups, bias=bias, ) self.bn = nn.BatchNorm2d(out_channels) self.act = get_activation(act, inplace=True) def forward(self, x): return self.act(self.bn(self.conv(x))) def fuseforward(self, x): return self.act(self.conv(x)) class DWConv(nn.Module): """Depthwise Conv + Conv""" def __init__(self, in_channels, out_channels, ksize, stride=1, act="silu"): super().__init__() self.dconv = BaseConv( in_channels, in_channels, ksize=ksize, stride=stride, groups=in_channels, act=act, ) self.pconv = BaseConv( in_channels, out_channels, ksize=1, stride=1, groups=1, act=act ) def forward(self, x): x = self.dconv(x) return self.pconv(x) class Bottleneck(nn.Module): # Standard bottleneck def __init__( self, in_channels, out_channels, shortcut=True, expansion=0.5, depthwise=False, act="silu", ): super().__init__() hidden_channels = int(out_channels * expansion) Conv = DWConv if depthwise else BaseConv self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) self.conv2 = Conv(hidden_channels, out_channels, 3, stride=1, act=act) self.use_add = shortcut and in_channels == out_channels def forward(self, x): y = self.conv2(self.conv1(x)) if self.use_add: y = y + x return y class ResLayer(nn.Module): "Residual layer with `in_channels` inputs." def __init__(self, in_channels: int): super().__init__() mid_channels = in_channels // 2 self.layer1 = BaseConv( in_channels, mid_channels, ksize=1, stride=1, act="lrelu" ) self.layer2 = BaseConv( mid_channels, in_channels, ksize=3, stride=1, act="lrelu" ) def forward(self, x): out = self.layer2(self.layer1(x)) return x + out class SPPBottleneck(nn.Module): """Spatial pyramid pooling layer used in YOLOv3-SPP""" def __init__( self, in_channels, out_channels, kernel_sizes=(5, 9, 13), activation="silu" ): super().__init__() hidden_channels = in_channels // 2 self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=activation) self.m = nn.ModuleList( [ nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2) for ks in kernel_sizes ] ) conv2_channels = hidden_channels * (len(kernel_sizes) + 1) self.conv2 = BaseConv(conv2_channels, out_channels, 1, stride=1, act=activation) def forward(self, x): x = self.conv1(x) x = torch.cat([x] + [m(x) for m in self.m], dim=1) x = self.conv2(x) return x class CSPLayer(nn.Module): """C3 in yolov5, CSP Bottleneck with 3 convolutions""" def __init__( self, in_channels, out_channels, n=1, shortcut=True, expansion=0.5, depthwise=False, act="silu", ): """ Args: in_channels (int): input channels. out_channels (int): output channels. n (int): number of Bottlenecks. Default value: 1. """ # ch_in, ch_out, number, shortcut, groups, expansion super().__init__() hidden_channels = int(out_channels * expansion) # hidden channels self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) self.conv2 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act) self.conv3 = BaseConv(2 * hidden_channels, out_channels, 1, stride=1, act=act) module_list = [ Bottleneck( hidden_channels, hidden_channels, shortcut, 1.0, depthwise, act=act ) for _ in range(n) ] self.m = nn.Sequential(*module_list) def forward(self, x): x_1 = self.conv1(x) x_2 = self.conv2(x) x_1 = self.m(x_1) x = torch.cat((x_1, x_2), dim=1) return self.conv3(x) class Focus(nn.Module): """Focus width and height information into channel space.""" def __init__(self, in_channels, out_channels, ksize=1, stride=1, act="silu"): super().__init__() self.conv = BaseConv(in_channels * 4, out_channels, ksize, stride, act=act) def forward(self, x): # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2) patch_top_left = x[..., ::2, ::2] patch_top_right = x[..., ::2, 1::2] patch_bot_left = x[..., 1::2, ::2] patch_bot_right = x[..., 1::2, 1::2] x = torch.cat( ( patch_top_left, patch_bot_left, patch_top_right, patch_bot_right, ), dim=1, ) return self.conv(x) ================================================ FILE: yolox/models/yolo_fpn.py ================================================ #!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import torch import torch.nn as nn from .darknet import Darknet from .network_blocks import BaseConv class YOLOFPN(nn.Module): """ YOLOFPN module. Darknet 53 is the default backbone of this model. """ def __init__( self, depth=53, in_features=["dark3", "dark4", "dark5"], ): super().__init__() self.backbone = Darknet(depth) self.in_features = in_features # out 1 self.out1_cbl = self._make_cbl(512, 256, 1) self.out1 = self._make_embedding([256, 512], 512 + 256) # out 2 self.out2_cbl = self._make_cbl(256, 128, 1) self.out2 = self._make_embedding([128, 256], 256 + 128) # upsample self.upsample = nn.Upsample(scale_factor=2, mode="nearest") def _make_cbl(self, _in, _out, ks): return BaseConv(_in, _out, ks, stride=1, act="lrelu") def _make_embedding(self, filters_list, in_filters): m = nn.Sequential( *[ self._make_cbl(in_filters, filters_list[0], 1), self._make_cbl(filters_list[0], filters_list[1], 3), self._make_cbl(filters_list[1], filters_list[0], 1), self._make_cbl(filters_list[0], filters_list[1], 3), self._make_cbl(filters_list[1], filters_list[0], 1), ] ) return m def load_pretrained_model(self, filename="./weights/darknet53.mix.pth"): with open(filename, "rb") as f: state_dict = torch.load(f, map_location="cpu") print("loading pretrained weights...") self.backbone.load_state_dict(state_dict) def forward(self, inputs): """ Args: inputs (Tensor): input image. Returns: Tuple[Tensor]: FPN output features.. """ # backbone out_features = self.backbone(inputs) x2, x1, x0 = [out_features[f] for f in self.in_features] # yolo branch 1 x1_in = self.out1_cbl(x0) x1_in = self.upsample(x1_in) x1_in = torch.cat([x1_in, x1], 1) out_dark4 = self.out1(x1_in) # yolo branch 2 x2_in = self.out2_cbl(out_dark4) x2_in = self.upsample(x2_in) x2_in = torch.cat([x2_in, x2], 1) out_dark3 = self.out2(x2_in) outputs = (out_dark3, out_dark4, x0) return outputs ================================================ FILE: yolox/models/yolo_head.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. from loguru import logger import torch import torch.nn as nn import torch.nn.functional as F from yolox.utils import bboxes_iou import math from .losses import IOUloss from .network_blocks import BaseConv, DWConv class YOLOXHead(nn.Module): def __init__( self, num_classes, width=1.0, strides=[8, 16, 32], in_channels=[256, 512, 1024], act="silu", depthwise=False, ): """ Args: act (str): activation type of conv. Defalut value: "silu". depthwise (bool): wheather apply depthwise conv in conv branch. Defalut value: False. """ super().__init__() self.n_anchors = 1 self.num_classes = num_classes self.decode_in_inference = True # for deploy, set to False self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() self.cls_preds = nn.ModuleList() self.reg_preds = nn.ModuleList() self.obj_preds = nn.ModuleList() self.stems = nn.ModuleList() Conv = DWConv if depthwise else BaseConv for i in range(len(in_channels)): self.stems.append( BaseConv( in_channels=int(in_channels[i] * width), out_channels=int(256 * width), ksize=1, stride=1, act=act, ) ) self.cls_convs.append( nn.Sequential( *[ Conv( in_channels=int(256 * width), out_channels=int(256 * width), ksize=3, stride=1, act=act, ), Conv( in_channels=int(256 * width), out_channels=int(256 * width), ksize=3, stride=1, act=act, ), ] ) ) self.reg_convs.append( nn.Sequential( *[ Conv( in_channels=int(256 * width), out_channels=int(256 * width), ksize=3, stride=1, act=act, ), Conv( in_channels=int(256 * width), out_channels=int(256 * width), ksize=3, stride=1, act=act, ), ] ) ) self.cls_preds.append( nn.Conv2d( in_channels=int(256 * width), out_channels=self.n_anchors * self.num_classes, kernel_size=1, stride=1, padding=0, ) ) self.reg_preds.append( nn.Conv2d( in_channels=int(256 * width), out_channels=4, kernel_size=1, stride=1, padding=0, ) ) self.obj_preds.append( nn.Conv2d( in_channels=int(256 * width), out_channels=self.n_anchors * 1, kernel_size=1, stride=1, padding=0, ) ) self.use_l1 = False self.l1_loss = nn.L1Loss(reduction="none") self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction="none") self.iou_loss = IOUloss(reduction="none") self.strides = strides self.grids = [torch.zeros(1)] * len(in_channels) self.expanded_strides = [None] * len(in_channels) def initialize_biases(self, prior_prob): for conv in self.cls_preds: b = conv.bias.view(self.n_anchors, -1) b.data.fill_(-math.log((1 - prior_prob) / prior_prob)) conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) for conv in self.obj_preds: b = conv.bias.view(self.n_anchors, -1) b.data.fill_(-math.log((1 - prior_prob) / prior_prob)) conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) def forward(self, xin, labels=None, imgs=None): outputs = [] origin_preds = [] x_shifts = [] y_shifts = [] expanded_strides = [] for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate( zip(self.cls_convs, self.reg_convs, self.strides, xin) ): x = self.stems[k](x) cls_x = x reg_x = x cls_feat = cls_conv(cls_x) cls_output = self.cls_preds[k](cls_feat) reg_feat = reg_conv(reg_x) reg_output = self.reg_preds[k](reg_feat) obj_output = self.obj_preds[k](reg_feat) if self.training: output = torch.cat([reg_output, obj_output, cls_output], 1) output, grid = self.get_output_and_grid( output, k, stride_this_level, xin[0].type() ) x_shifts.append(grid[:, :, 0]) y_shifts.append(grid[:, :, 1]) expanded_strides.append( torch.zeros(1, grid.shape[1]) .fill_(stride_this_level) .type_as(xin[0]) ) if self.use_l1: batch_size = reg_output.shape[0] hsize, wsize = reg_output.shape[-2:] reg_output = reg_output.view( batch_size, self.n_anchors, 4, hsize, wsize ) reg_output = reg_output.permute(0, 1, 3, 4, 2).reshape( batch_size, -1, 4 ) origin_preds.append(reg_output.clone()) else: output = torch.cat( [reg_output, obj_output.sigmoid(), cls_output.sigmoid()], 1 ) outputs.append(output) if self.training: return self.get_losses( imgs, x_shifts, y_shifts, expanded_strides, labels, torch.cat(outputs, 1), origin_preds, dtype=xin[0].dtype, ) else: self.hw = [x.shape[-2:] for x in outputs] # [batch, n_anchors_all, 85] outputs = torch.cat( [x.flatten(start_dim=2) for x in outputs], dim=2 ).permute(0, 2, 1) if self.decode_in_inference: return self.decode_outputs(outputs, dtype=xin[0].type()) else: return outputs def get_output_and_grid(self, output, k, stride, dtype): grid = self.grids[k] batch_size = output.shape[0] n_ch = 5 + self.num_classes hsize, wsize = output.shape[-2:] if grid.shape[2:4] != output.shape[2:4]: yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)]) grid = torch.stack((xv, yv), 2).view(1, 1, hsize, wsize, 2).type(dtype) self.grids[k] = grid output = output.view(batch_size, self.n_anchors, n_ch, hsize, wsize) output = output.permute(0, 1, 3, 4, 2).reshape( batch_size, self.n_anchors * hsize * wsize, -1 ) grid = grid.view(1, -1, 2) output[..., :2] = (output[..., :2] + grid) * stride output[..., 2:4] = torch.exp(output[..., 2:4]) * stride return output, grid def decode_outputs(self, outputs, dtype): grids = [] strides = [] for (hsize, wsize), stride in zip(self.hw, self.strides): yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)]) grid = torch.stack((xv, yv), 2).view(1, -1, 2) grids.append(grid) shape = grid.shape[:2] strides.append(torch.full((*shape, 1), stride)) grids = torch.cat(grids, dim=1).type(dtype) strides = torch.cat(strides, dim=1).type(dtype) outputs[..., :2] = (outputs[..., :2] + grids) * strides outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides return outputs def get_losses( self, imgs, x_shifts, y_shifts, expanded_strides, labels, outputs, origin_preds, dtype, ): bbox_preds = outputs[:, :, :4] # [batch, n_anchors_all, 4] obj_preds = outputs[:, :, 4].unsqueeze(-1) # [batch, n_anchors_all, 1] cls_preds = outputs[:, :, 5:] # [batch, n_anchors_all, n_cls] # calculate targets mixup = labels.shape[2] > 5 if mixup: label_cut = labels[..., :5] else: label_cut = labels nlabel = (label_cut.sum(dim=2) > 0).sum(dim=1) # number of objects total_num_anchors = outputs.shape[1] x_shifts = torch.cat(x_shifts, 1) # [1, n_anchors_all] y_shifts = torch.cat(y_shifts, 1) # [1, n_anchors_all] expanded_strides = torch.cat(expanded_strides, 1) if self.use_l1: origin_preds = torch.cat(origin_preds, 1) cls_targets = [] reg_targets = [] l1_targets = [] obj_targets = [] fg_masks = [] num_fg = 0.0 num_gts = 0.0 for batch_idx in range(outputs.shape[0]): num_gt = int(nlabel[batch_idx]) num_gts += num_gt if num_gt == 0: cls_target = outputs.new_zeros((0, self.num_classes)) reg_target = outputs.new_zeros((0, 4)) l1_target = outputs.new_zeros((0, 4)) obj_target = outputs.new_zeros((total_num_anchors, 1)) fg_mask = outputs.new_zeros(total_num_anchors).bool() else: gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5] gt_classes = labels[batch_idx, :num_gt, 0] bboxes_preds_per_image = bbox_preds[batch_idx] try: ( gt_matched_classes, fg_mask, pred_ious_this_matching, matched_gt_inds, num_fg_img, ) = self.get_assignments( # noqa batch_idx, num_gt, total_num_anchors, gt_bboxes_per_image, gt_classes, bboxes_preds_per_image, expanded_strides, x_shifts, y_shifts, cls_preds, bbox_preds, obj_preds, labels, imgs, ) except RuntimeError: logger.info( "OOM RuntimeError is raised due to the huge memory cost during label assignment. \ CPU mode is applied in this batch. If you want to avoid this issue, \ try to reduce the batch size or image size." ) print("OOM RuntimeError is raised due to the huge memory cost during label assignment. \ CPU mode is applied in this batch. If you want to avoid this issue, \ try to reduce the batch size or image size.") torch.cuda.empty_cache() ( gt_matched_classes, fg_mask, pred_ious_this_matching, matched_gt_inds, num_fg_img, ) = self.get_assignments( # noqa batch_idx, num_gt, total_num_anchors, gt_bboxes_per_image, gt_classes, bboxes_preds_per_image, expanded_strides, x_shifts, y_shifts, cls_preds, bbox_preds, obj_preds, labels, imgs, "cpu", ) torch.cuda.empty_cache() num_fg += num_fg_img cls_target = F.one_hot( gt_matched_classes.to(torch.int64), self.num_classes ) * pred_ious_this_matching.unsqueeze(-1) obj_target = fg_mask.unsqueeze(-1) reg_target = gt_bboxes_per_image[matched_gt_inds] if self.use_l1: l1_target = self.get_l1_target( outputs.new_zeros((num_fg_img, 4)), gt_bboxes_per_image[matched_gt_inds], expanded_strides[0][fg_mask], x_shifts=x_shifts[0][fg_mask], y_shifts=y_shifts[0][fg_mask], ) cls_targets.append(cls_target) reg_targets.append(reg_target) obj_targets.append(obj_target.to(dtype)) fg_masks.append(fg_mask) if self.use_l1: l1_targets.append(l1_target) cls_targets = torch.cat(cls_targets, 0) reg_targets = torch.cat(reg_targets, 0) obj_targets = torch.cat(obj_targets, 0) fg_masks = torch.cat(fg_masks, 0) if self.use_l1: l1_targets = torch.cat(l1_targets, 0) num_fg = max(num_fg, 1) loss_iou = ( self.iou_loss(bbox_preds.view(-1, 4)[fg_masks], reg_targets) ).sum() / num_fg loss_obj = ( self.bcewithlog_loss(obj_preds.view(-1, 1), obj_targets) ).sum() / num_fg loss_cls = ( self.bcewithlog_loss( cls_preds.view(-1, self.num_classes)[fg_masks], cls_targets ) ).sum() / num_fg if self.use_l1: loss_l1 = ( self.l1_loss(origin_preds.view(-1, 4)[fg_masks], l1_targets) ).sum() / num_fg else: loss_l1 = 0.0 reg_weight = 5.0 loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1 return ( loss, reg_weight * loss_iou, loss_obj, loss_cls, loss_l1, num_fg / max(num_gts, 1), ) def get_l1_target(self, l1_target, gt, stride, x_shifts, y_shifts, eps=1e-8): l1_target[:, 0] = gt[:, 0] / stride - x_shifts l1_target[:, 1] = gt[:, 1] / stride - y_shifts l1_target[:, 2] = torch.log(gt[:, 2] / stride + eps) l1_target[:, 3] = torch.log(gt[:, 3] / stride + eps) return l1_target @torch.no_grad() def get_assignments( self, batch_idx, num_gt, total_num_anchors, gt_bboxes_per_image, gt_classes, bboxes_preds_per_image, expanded_strides, x_shifts, y_shifts, cls_preds, bbox_preds, obj_preds, labels, imgs, mode="gpu", ): if mode == "cpu": print("------------CPU Mode for This Batch-------------") gt_bboxes_per_image = gt_bboxes_per_image.cpu().float() bboxes_preds_per_image = bboxes_preds_per_image.cpu().float() gt_classes = gt_classes.cpu().float() expanded_strides = expanded_strides.cpu().float() x_shifts = x_shifts.cpu() y_shifts = y_shifts.cpu() img_size = imgs.shape[2:] fg_mask, is_in_boxes_and_center = self.get_in_boxes_info( gt_bboxes_per_image, expanded_strides, x_shifts, y_shifts, total_num_anchors, num_gt, img_size ) bboxes_preds_per_image = bboxes_preds_per_image[fg_mask] cls_preds_ = cls_preds[batch_idx][fg_mask] obj_preds_ = obj_preds[batch_idx][fg_mask] num_in_boxes_anchor = bboxes_preds_per_image.shape[0] if mode == "cpu": gt_bboxes_per_image = gt_bboxes_per_image.cpu() bboxes_preds_per_image = bboxes_preds_per_image.cpu() pair_wise_ious = bboxes_iou(gt_bboxes_per_image, bboxes_preds_per_image, False) gt_cls_per_image = ( F.one_hot(gt_classes.to(torch.int64), self.num_classes) .float() .unsqueeze(1) .repeat(1, num_in_boxes_anchor, 1) ) pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8) if mode == "cpu": cls_preds_, obj_preds_ = cls_preds_.cpu(), obj_preds_.cpu() with torch.cuda.amp.autocast(enabled=False): cls_preds_ = ( cls_preds_.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() * obj_preds_.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() ) pair_wise_cls_loss = F.binary_cross_entropy( cls_preds_.sqrt_(), gt_cls_per_image, reduction="none" ).sum(-1) del cls_preds_ cost = ( pair_wise_cls_loss + 3.0 * pair_wise_ious_loss + 100000.0 * (~is_in_boxes_and_center) ) ( num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds, ) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt, fg_mask) del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss if mode == "cpu": gt_matched_classes = gt_matched_classes.cuda() fg_mask = fg_mask.cuda() pred_ious_this_matching = pred_ious_this_matching.cuda() matched_gt_inds = matched_gt_inds.cuda() return ( gt_matched_classes, fg_mask, pred_ious_this_matching, matched_gt_inds, num_fg, ) def get_in_boxes_info( self, gt_bboxes_per_image, expanded_strides, x_shifts, y_shifts, total_num_anchors, num_gt, img_size ): expanded_strides_per_image = expanded_strides[0] x_shifts_per_image = x_shifts[0] * expanded_strides_per_image y_shifts_per_image = y_shifts[0] * expanded_strides_per_image x_centers_per_image = ( (x_shifts_per_image + 0.5 * expanded_strides_per_image) .unsqueeze(0) .repeat(num_gt, 1) ) # [n_anchor] -> [n_gt, n_anchor] y_centers_per_image = ( (y_shifts_per_image + 0.5 * expanded_strides_per_image) .unsqueeze(0) .repeat(num_gt, 1) ) gt_bboxes_per_image_l = ( (gt_bboxes_per_image[:, 0] - 0.5 * gt_bboxes_per_image[:, 2]) .unsqueeze(1) .repeat(1, total_num_anchors) ) gt_bboxes_per_image_r = ( (gt_bboxes_per_image[:, 0] + 0.5 * gt_bboxes_per_image[:, 2]) .unsqueeze(1) .repeat(1, total_num_anchors) ) gt_bboxes_per_image_t = ( (gt_bboxes_per_image[:, 1] - 0.5 * gt_bboxes_per_image[:, 3]) .unsqueeze(1) .repeat(1, total_num_anchors) ) gt_bboxes_per_image_b = ( (gt_bboxes_per_image[:, 1] + 0.5 * gt_bboxes_per_image[:, 3]) .unsqueeze(1) .repeat(1, total_num_anchors) ) b_l = x_centers_per_image - gt_bboxes_per_image_l b_r = gt_bboxes_per_image_r - x_centers_per_image b_t = y_centers_per_image - gt_bboxes_per_image_t b_b = gt_bboxes_per_image_b - y_centers_per_image bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2) is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0 is_in_boxes_all = is_in_boxes.sum(dim=0) > 0 # in fixed center center_radius = 2.5 # clip center inside image gt_bboxes_per_image_clip = gt_bboxes_per_image[:, 0:2].clone() gt_bboxes_per_image_clip[:, 0] = torch.clamp(gt_bboxes_per_image_clip[:, 0], min=0, max=img_size[1]) gt_bboxes_per_image_clip[:, 1] = torch.clamp(gt_bboxes_per_image_clip[:, 1], min=0, max=img_size[0]) gt_bboxes_per_image_l = (gt_bboxes_per_image_clip[:, 0]).unsqueeze(1).repeat( 1, total_num_anchors ) - center_radius * expanded_strides_per_image.unsqueeze(0) gt_bboxes_per_image_r = (gt_bboxes_per_image_clip[:, 0]).unsqueeze(1).repeat( 1, total_num_anchors ) + center_radius * expanded_strides_per_image.unsqueeze(0) gt_bboxes_per_image_t = (gt_bboxes_per_image_clip[:, 1]).unsqueeze(1).repeat( 1, total_num_anchors ) - center_radius * expanded_strides_per_image.unsqueeze(0) gt_bboxes_per_image_b = (gt_bboxes_per_image_clip[:, 1]).unsqueeze(1).repeat( 1, total_num_anchors ) + center_radius * expanded_strides_per_image.unsqueeze(0) c_l = x_centers_per_image - gt_bboxes_per_image_l c_r = gt_bboxes_per_image_r - x_centers_per_image c_t = y_centers_per_image - gt_bboxes_per_image_t c_b = gt_bboxes_per_image_b - y_centers_per_image center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2) is_in_centers = center_deltas.min(dim=-1).values > 0.0 is_in_centers_all = is_in_centers.sum(dim=0) > 0 # in boxes and in centers is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all is_in_boxes_and_center = ( is_in_boxes[:, is_in_boxes_anchor] & is_in_centers[:, is_in_boxes_anchor] ) del gt_bboxes_per_image_clip return is_in_boxes_anchor, is_in_boxes_and_center def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt, fg_mask): # Dynamic K # --------------------------------------------------------------- matching_matrix = torch.zeros_like(cost) ious_in_boxes_matrix = pair_wise_ious n_candidate_k = min(10, ious_in_boxes_matrix.size(1)) topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1) dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1) for gt_idx in range(num_gt): _, pos_idx = torch.topk( cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False ) matching_matrix[gt_idx][pos_idx] = 1.0 del topk_ious, dynamic_ks, pos_idx anchor_matching_gt = matching_matrix.sum(0) if (anchor_matching_gt > 1).sum() > 0: cost_min, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0) matching_matrix[:, anchor_matching_gt > 1] *= 0.0 matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 fg_mask_inboxes = matching_matrix.sum(0) > 0.0 num_fg = fg_mask_inboxes.sum().item() fg_mask[fg_mask.clone()] = fg_mask_inboxes matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) gt_matched_classes = gt_classes[matched_gt_inds] pred_ious_this_matching = (matching_matrix * pair_wise_ious).sum(0)[ fg_mask_inboxes ] return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds ================================================ FILE: yolox/models/yolo_pafpn.py ================================================ #!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import torch import torch.nn as nn from .darknet import CSPDarknet from .network_blocks import BaseConv, CSPLayer, DWConv class YOLOPAFPN(nn.Module): """ YOLOv3 model. Darknet 53 is the default backbone of this model. """ def __init__( self, depth=1.0, width=1.0, in_features=("dark3", "dark4", "dark5"), in_channels=[256, 512, 1024], depthwise=False, act="silu", ): super().__init__() self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act) self.in_features = in_features self.in_channels = in_channels Conv = DWConv if depthwise else BaseConv self.upsample = nn.Upsample(scale_factor=2, mode="nearest") self.lateral_conv0 = BaseConv( int(in_channels[2] * width), int(in_channels[1] * width), 1, 1, act=act ) self.C3_p4 = CSPLayer( int(2 * in_channels[1] * width), int(in_channels[1] * width), round(3 * depth), False, depthwise=depthwise, act=act, ) # cat self.reduce_conv1 = BaseConv( int(in_channels[1] * width), int(in_channels[0] * width), 1, 1, act=act ) self.C3_p3 = CSPLayer( int(2 * in_channels[0] * width), int(in_channels[0] * width), round(3 * depth), False, depthwise=depthwise, act=act, ) # bottom-up conv self.bu_conv2 = Conv( int(in_channels[0] * width), int(in_channels[0] * width), 3, 2, act=act ) self.C3_n3 = CSPLayer( int(2 * in_channels[0] * width), int(in_channels[1] * width), round(3 * depth), False, depthwise=depthwise, act=act, ) # bottom-up conv self.bu_conv1 = Conv( int(in_channels[1] * width), int(in_channels[1] * width), 3, 2, act=act ) self.C3_n4 = CSPLayer( int(2 * in_channels[1] * width), int(in_channels[2] * width), round(3 * depth), False, depthwise=depthwise, act=act, ) def forward(self, input): """ Args: inputs: input images. Returns: Tuple[Tensor]: FPN feature. """ # backbone out_features = self.backbone(input) features = [out_features[f] for f in self.in_features] [x2, x1, x0] = features fpn_out0 = self.lateral_conv0(x0) # 1024->512/32 f_out0 = self.upsample(fpn_out0) # 512/16 f_out0 = torch.cat([f_out0, x1], 1) # 512->1024/16 f_out0 = self.C3_p4(f_out0) # 1024->512/16 fpn_out1 = self.reduce_conv1(f_out0) # 512->256/16 f_out1 = self.upsample(fpn_out1) # 256/8 f_out1 = torch.cat([f_out1, x2], 1) # 256->512/8 pan_out2 = self.C3_p3(f_out1) # 512->256/8 p_out1 = self.bu_conv2(pan_out2) # 256->256/16 p_out1 = torch.cat([p_out1, fpn_out1], 1) # 256->512/16 pan_out1 = self.C3_n3(p_out1) # 512->512/16 p_out0 = self.bu_conv1(pan_out1) # 512->512/32 p_out0 = torch.cat([p_out0, fpn_out0], 1) # 512->1024/32 pan_out0 = self.C3_n4(p_out0) # 1024->1024/32 outputs = (pan_out2, pan_out1, pan_out0) return outputs ================================================ FILE: yolox/models/yolox.py ================================================ #!/usr/bin/env python # -*- encoding: utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import torch.nn as nn from .yolo_head import YOLOXHead from .yolo_pafpn import YOLOPAFPN class YOLOX(nn.Module): """ YOLOX model module. The module list is defined by create_yolov3_modules function. The network returns loss values from three YOLO layers during training and detection results during test. """ def __init__(self, backbone=None, head=None): super().__init__() if backbone is None: backbone = YOLOPAFPN() if head is None: head = YOLOXHead(80) self.backbone = backbone self.head = head def forward(self, x, targets=None): # fpn output content features of [dark3, dark4, dark5] fpn_outs = self.backbone(x) if self.training: assert targets is not None loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg = self.head( fpn_outs, targets, x ) outputs = { "total_loss": loss, "iou_loss": iou_loss, "l1_loss": l1_loss, "conf_loss": conf_loss, "cls_loss": cls_loss, "num_fg": num_fg, } else: outputs = self.head(fpn_outs) return outputs ================================================ FILE: yolox/tracker/basetrack.py ================================================ import numpy as np from collections import OrderedDict class TrackState(object): New = 0 Tracked = 1 Lost = 2 Removed = 3 class BaseTrack(object): _count = 0 track_id = 0 is_activated = False state = TrackState.New history = OrderedDict() features = [] curr_feature = None score = 0 start_frame = 0 frame_id = 0 time_since_update = 0 # multi-camera location = (np.inf, np.inf) @property def end_frame(self): return self.frame_id @staticmethod def next_id(): BaseTrack._count += 1 return BaseTrack._count def activate(self, *args): raise NotImplementedError def predict(self): raise NotImplementedError def update(self, *args, **kwargs): raise NotImplementedError def mark_lost(self): self.state = TrackState.Lost def mark_removed(self): self.state = TrackState.Removed ================================================ FILE: yolox/tracker/diffusion_tracker.py ================================================ import numpy as np from collections import deque import torch import torch.nn.functional as F import torchvision from copy import deepcopy from yolox.tracker import matching from detectron2.structures import Boxes from yolox.utils.box_ops import box_xyxy_to_cxcywh from yolox.utils.boxes import xyxy2cxcywh from torchvision.ops import box_iou,nms from yolox.utils.cluster_nms import cluster_nms class DiffusionTracker(object): def __init__(self,model,tensor_type,conf_thresh=0.7,det_thresh=0.6,nms_thresh_3d=0.7,nms_thresh_2d=0.75,interval=5): self.frame_id = 0 self.backbone=model.backbone self.feature_projs=model.projs self.diffusion_model=model.head self.feature_extractor=self.diffusion_model.head.box_pooler self.det_thresh = det_thresh self.association_thresh = conf_thresh self.low_det_thresh = 0.1 self.low_association_thresh = 0.2 self.nms_thresh_2d=nms_thresh_2d self.nms_thresh_3d=nms_thresh_3d self.same_thresh=0.9 self.pre_features=None self.data_type=tensor_type self.re_association_features=None self.re_association_interval=interval # [tracklet_id,T,6] (x,y,x,y,score,t) self.tracklet_db=None self.total_time=0 self.dynamic_time=True self.repeat_times=8 self.sampling_steps=1 self.num_boxes=1000 self.track_t=40 self.re_association_t=40 self.mot17=False def update(self,cur_image): self.frame_id += 1 cur_features,mate_info=self.extract_feature(cur_image=cur_image) mate_shape,mate_device,mate_dtype=mate_info self.diffusion_model.device=mate_device self.diffusion_model.dtype=mate_dtype b,_,h,w=mate_shape images_whwh=torch.tensor([w, h, w, h], dtype=mate_dtype, device=mate_device)[None,:].expand(4*b,4) if self.frame_id==1: if self.pre_features is None: self.pre_features=cur_features inps=self.prepare_input(self.pre_features,cur_features) diffusion_outputs,conf_scores,association_time=self.diffusion_model.new_ddim_sample(inps,images_whwh,num_timesteps=self.sampling_steps,num_proposals=self.num_boxes, dynamic_time=self.dynamic_time,track_candidate=self.repeat_times) self.total_time+=association_time _,_,detections=self.diffusion_postprocess(diffusion_outputs,conf_scores,conf_thre=self.association_thresh,nms_thre=self.nms_thresh_3d) detections=self.diffusion_det_filt(detections,conf_thre=self.det_thresh,nms_thre=self.nms_thresh_2d) self.tracklet_db=np.zeros((len(detections),1,6)) self.tracklet_db[:,-1,:4]=detections[:,:4] self.tracklet_db[:,-1,4]=detections[:,5] self.tracklet_db[:,-1,5]=self.frame_id else: ref_bboxes,ref_track_ids=self.get_targets_from_tracklet_db() inps=self.prepare_input(self.pre_features,cur_features) bboxes=box_xyxy_to_cxcywh(torch.tensor(np.array(ref_bboxes))).type(self.data_type).reshape(1,-1,4).repeat(2,1,1) # ref_num_proposals=self.proposal_schedule(len(ref_bboxes)) # ref_sampling_steps=self.sampling_steps_schedule(len(ref_bboxes)) track_tracklet_db=np.concatenate([np.zeros((len(self.tracklet_db),1,5)),deepcopy(self.tracklet_db[:,-1,5]).reshape(-1,1,1)],axis=2) diffusion_outputs,conf_scores,association_time=self.diffusion_model.new_ddim_sample(inps,images_whwh,num_timesteps=self.sampling_steps,num_proposals=self.num_boxes, ref_targets=bboxes,dynamic_time=self.dynamic_time,track_candidate=self.repeat_times,diffusion_t=self.track_t) self.total_time+=association_time diffusion_ref_detections,diffusion_track_detections,detections=self.diffusion_postprocess(diffusion_outputs, conf_scores, conf_thre=self.low_association_thresh, nms_thre=self.nms_thresh_3d) high_track_inds=diffusion_ref_detections[:,4]>self.association_thresh diffusion_ref_detections,diffusion_track_detections=diffusion_ref_detections[high_track_inds],diffusion_track_detections[high_track_inds] detections=self.diffusion_det_filt(detections,conf_thre=self.low_det_thresh,nms_thre=self.nms_thresh_2d) diffusion_ref_detections,diffusion_track_detections=self.diffusion_track_filt(diffusion_ref_detections, diffusion_track_detections, conf_thre=self.low_det_thresh, nms_thre=self.nms_thresh_2d) pred_track_ids,pred_bboxes,pred_scores=self.diffusion_matching(ref_bboxes,ref_track_ids, diffusion_ref_detections, diffusion_track_detections) high_det_inds=detections[:,5]>self.det_thresh if pred_bboxes is None: new_detections=detections new_detections_inds=high_det_inds else: dists = matching.iou_distance(pred_bboxes, detections[:,:4]) if self.mot17: dists=matching.fuse_score(dists,detections[:,5]) matches,u_track, u_detection = matching.linear_assignment(dists, thresh=self.same_thresh) new_detections=detections[u_detection] new_detections_inds=high_det_inds[u_detection] if len(matches)>0: pred_bboxes[matches[:,0]]=detections[matches[:,1],:4] if ref_track_ids is not None and pred_track_ids is not None: matching_index=np.argwhere(np.array(ref_track_ids).reshape(-1,1)==pred_track_ids.reshape(1,-1)) track_tracklet_db[ref_track_ids[matching_index[:,0]],-1,:4]=pred_bboxes[matching_index[:,1]] track_tracklet_db[ref_track_ids[matching_index[:,0]],-1,4]=pred_scores[matching_index[:,1]] track_tracklet_db[ref_track_ids[matching_index[:,0]],-1,5]=self.frame_id # self.track_t=400 self.track_t=self.extract_mean_track_t(self.tracklet_db[ref_track_ids[matching_index[:,0]],-1,:4],pred_bboxes[matching_index[:,1]]) # print(self.track_t) self.tracklet_db=np.concatenate([self.tracklet_db,track_tracklet_db],axis=1) # yolox init new tracks if len(new_detections[new_detections_inds])>0: new_detections=new_detections[new_detections_inds] pred_bboxes,pred_scores=new_detections[:,:4],new_detections[:,5] new_tracklet_db=np.zeros((len(new_detections),self.frame_id,6)) new_tracklet_db[:,-1,:4]=pred_bboxes new_tracklet_db[:,-1,4]=pred_scores new_tracklet_db[:,-1,5]=self.frame_id self.tracklet_db=np.concatenate([self.tracklet_db,new_tracklet_db],axis=0) self.pre_features=cur_features if (self.frame_id-1)%self.re_association_interval==0: if self.frame_id!=1: # reassociation inps=self.prepare_input(self.re_association_features,cur_features) # images_whwh=torch.tensor([w, h, w, h], dtype=mate_dtype, device=mate_device)[None,:].expand(4*b,4) ref_mask=self.tracklet_db[:,-1-self.re_association_interval,:5].sum(-1)>0 ref_bbox=deepcopy(self.tracklet_db[ref_mask,-1-self.re_association_interval,:4]) ref_track_ids=np.arange(len(self.tracklet_db))[ref_mask] cur_mask=self.tracklet_db[:,-1,:5].sum(-1)>0 cur_bbox=deepcopy(self.tracklet_db[cur_mask,-1,:4]) cur_track_ids=np.arange(len(self.tracklet_db))[cur_mask] mix_mask=np.logical_and(ref_mask,cur_mask) if sum(mix_mask)>0: # self.re_association_t=400 self.re_association_t=self.extract_mean_track_t(self.tracklet_db[mix_mask,-1-self.re_association_interval,:4],self.tracklet_db[mix_mask,-1,:4]) bboxes=box_xyxy_to_cxcywh(torch.tensor(np.array(ref_bbox))).type(self.data_type).reshape(1,-1,4).repeat(2,1,1) diffusion_outputs,conf_scores,association_time=self.diffusion_model.new_ddim_sample(inps,images_whwh,num_timesteps=self.sampling_steps,num_proposals=self.num_boxes, ref_targets=bboxes,dynamic_time=self.dynamic_time,track_candidate=self.repeat_times,diffusion_t=self.re_association_t) # self.total_time+=association_time diffusion_ref_detections,diffusion_track_detections,_=self.diffusion_postprocess(diffusion_outputs, conf_scores, conf_thre=self.association_thresh, nms_thre=self.nms_thresh_3d) diffusion_ref_detections,diffusion_track_detections=self.diffusion_track_filt(diffusion_ref_detections, diffusion_track_detections, conf_thre=self.det_thresh, nms_thre=self.nms_thresh_2d) pred_track_ids,pred_bboxes,pred_scores=self.diffusion_matching(ref_bbox,ref_track_ids, diffusion_ref_detections, diffusion_track_detections) if pred_bboxes is not None: dists = matching.iou_distance(pred_bboxes,cur_bbox) matches,u_track, u_detection = matching.linear_assignment(dists, thresh=self.same_thresh) if len(matches)>0: re_aasociation_mask=pred_track_ids[matches[:,0]]!=cur_track_ids[matches[:,1]] for pre_track_id,cur_track_id in zip(pred_track_ids[matches[:,0]][re_aasociation_mask], cur_track_ids[matches[:,1]][re_aasociation_mask]): if self.tracklet_db[cur_track_id,-1-self.re_association_interval,-1]==0 and pre_track_id!=cur_track_id and \ max(self.tracklet_db[pre_track_id,-1-self.re_association_interval:,-1])self.tracklet_db[cur_track_id],self.tracklet_db[pre_track_id],self.tracklet_db[cur_track_id]) self.re_association_features=cur_features def get_results(self): results=[] overall_obj_ids=np.arange(len(self.tracklet_db)) for t in range(len(self.tracklet_db[0])): activated_mask=self.tracklet_db[:,t,:5].sum(-1)>0 obj_info=self.tracklet_db[activated_mask,t,:] obj_track_ids=overall_obj_ids[activated_mask] results.append((obj_track_ids,obj_info)) return results def extract_feature(self,cur_image): fpn_outs=self.backbone(cur_image) cur_features=[] for proj,l_feat in zip(self.feature_projs,fpn_outs): cur_features.append(proj(l_feat)) mate_info=(cur_image.shape,cur_image.device,cur_image.dtype) return cur_features,mate_info def extract_mean_track_t(self,pre_box,cur_box): # "xyxy" pre_box=xyxy2cxcywh(pre_box) cur_box=xyxy2cxcywh(cur_box) abs_box=np.abs(pre_box-cur_box) abs_percent=np.sum(abs_box/(pre_box+1e-5),axis=1)/4 track_t=np.mean(abs_percent) return min(max(int(track_t*1000),1),999) def diffusion_postprocess(self,diffusion_outputs,conf_scores,nms_thre=0.7,conf_thre=0.6): pre_prediction,cur_prediction=diffusion_outputs.split(len(diffusion_outputs)//2,dim=0) output = [None for _ in range(len(pre_prediction))] for i,(pre_image_pred,cur_image_pred,association_score) in enumerate(zip(pre_prediction,cur_prediction,conf_scores)): association_score=association_score.flatten() # If none are remaining => process next image if not pre_image_pred.size(0): continue # _, conf_mask = torch.topk((image_pred[:, 4] * class_conf.squeeze()), 1000) # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred) detections=torch.zeros((2,len(cur_image_pred),7),dtype=cur_image_pred.dtype,device=cur_image_pred.device) detections[0,:,:4]=pre_image_pred[:,:4] detections[1,:,:4]=cur_image_pred[:,:4] detections[0,:,4]=association_score detections[1,:,4]=association_score detections[0,:,5]=torch.sqrt(torch.sigmoid(pre_image_pred[:,4])*association_score) detections[1,:,5]=torch.sqrt(torch.sigmoid(cur_image_pred[:,4])*association_score) score_out_index=association_score>conf_thre # strategy=torch.mean # value=strategy(detections[:,:,5],dim=0,keepdim=False) # score_out_index=value>conf_thre detections=detections[:,score_out_index,:] if not detections.size(1): output[i]=detections continue nms_out_index_3d = cluster_nms( detections[0,:,:4], detections[1,:,:4], # value[score_out_index], detections[0,:,4], iou_threshold=nms_thre) detections = detections[:,nms_out_index_3d,:] if output[i] is None: output[i] = detections else: output[i] = torch.cat((output[i], detections)) return output[0][0],output[0][1],torch.cat([output[1][0],output[1][1]],dim=0) if len(output)>=2 else None def diffusion_track_filt(self,ref_detections,track_detections,conf_thre=0.6,nms_thre=0.7): if not ref_detections.size(1): return ref_detections.cpu().numpy(),track_detections.cpu().numpy() scores=ref_detections[:,5] score_out_index=scores>conf_thre ref_detections=ref_detections[score_out_index] track_detections=track_detections[score_out_index] nms_out_index = torchvision.ops.batched_nms( ref_detections[:, :4], ref_detections[:, 5], ref_detections[:, 6], nms_thre, ) return ref_detections[nms_out_index].cpu().numpy(),track_detections[nms_out_index].cpu().numpy() def diffusion_det_filt(self,diffusion_detections,conf_thre=0.6,nms_thre=0.7): if not diffusion_detections.size(1): return diffusion_detections.cpu().numpy() scores=diffusion_detections[:,5] score_out_index=scores>conf_thre diffusion_detections=diffusion_detections[score_out_index] nms_out_index = torchvision.ops.batched_nms( diffusion_detections[:, :4], diffusion_detections[:, 5], diffusion_detections[:, 6], nms_thre, ) return diffusion_detections[nms_out_index].cpu().numpy() def diffusion_matching(self,ref_bboxes,ref_track_ids,diffusion_pre_track_outputs,diffusion_cur_track_outputs): ref_bboxes=np.array(ref_bboxes) dists=matching.iou_distance(ref_bboxes,diffusion_pre_track_outputs[:,:4]) matches,u_track, u_detection = matching.linear_assignment(dists, thresh=self.same_thresh) if len(matches)>0: ref_track_ids=np.array(ref_track_ids)[matches[:,0]] return ref_track_ids,diffusion_cur_track_outputs[matches[:,1],:4],diffusion_cur_track_outputs[matches[:,1],5] else: return None,None,None def proposal_schedule(self,num_ref_bboxes): # simple strategy return 16*num_ref_bboxes def sampling_steps_schedule(self,num_ref_bboxes): min_sampling_steps=1 max_sampling_steps=4 min_num_bboxes=10 max_num_bboxes=100 ref_sampling_steps=(num_ref_bboxes-min_num_bboxes)*(max_sampling_steps-min_sampling_steps)/(max_num_bboxes-min_num_bboxes)+min_sampling_steps return min(max(int(ref_sampling_steps),min_sampling_steps),max_sampling_steps) def vote_to_remove_candidate(self,track_ids,detections,vote_iou_thres=0.75,sorted=False,descending=False): box_pred_per_image, scores_per_image=detections[:,:4],detections[:,4]*detections[:,5] score_track_indices=torch.argsort((track_ids+scores_per_image),descending=True) track_ids=track_ids[score_track_indices] scores_per_image=scores_per_image[score_track_indices] box_pred_per_image=box_pred_per_image[score_track_indices] assert len(track_ids)==box_pred_per_image.shape[0] # vote guarantee only one track id in track candidates keep_mask = torch.zeros_like(scores_per_image, dtype=torch.bool) for class_id in torch.unique(track_ids): curr_indices = torch.where(track_ids == class_id)[0] curr_keep_indices = nms(box_pred_per_image[curr_indices],scores_per_image[curr_indices],vote_iou_thres) candidate_iou_indices=box_iou(box_pred_per_image[curr_indices],box_pred_per_image[curr_indices])>vote_iou_thres counter=[] for cluster_indice in candidate_iou_indices[curr_keep_indices]: cluster_scores=scores_per_image[curr_indices][cluster_indice] counter.append(len(cluster_scores)+torch.mean(cluster_scores)) max_indice=torch.argmax(torch.tensor(counter).type(self.data_type)) keep_mask[curr_indices[curr_keep_indices][max_indice]] = True keep_indices = torch.where(keep_mask)[0] track_ids=track_ids[keep_indices] box_pred_per_image=box_pred_per_image[keep_indices] scores_per_image=scores_per_image[keep_indices] if sorted and not descending: descending_indices=torch.argsort(track_ids) track_ids=track_ids[descending_indices] box_pred_per_image=box_pred_per_image[descending_indices] scores_per_image=scores_per_image[descending_indices] return track_ids.cpu().numpy(),box_pred_per_image.cpu().numpy(),scores_per_image.cpu().numpy() def prepare_input(self,pre_features,cur_features): inps_pre_features=[] inps_cur_Features=[] for l_pre_feat,l_cur_feat in zip(pre_features,cur_features): inps_pre_features.append(torch.cat([l_pre_feat.clone(),l_cur_feat.clone()],dim=0)) inps_cur_Features.append(torch.cat([l_cur_feat.clone(),l_cur_feat.clone()],dim=0)) return (inps_pre_features,inps_cur_Features) def get_targets_from_tracklet_db(self): ref_mask=self.tracklet_db[:,-1,:5].sum(-1)>0 ref_bbox=deepcopy(self.tracklet_db[ref_mask,-1,:4]) ref_track_ids=np.arange(len(self.tracklet_db))[ref_mask] return ref_bbox,ref_track_ids def joint_stracks(tlista, tlistb): exists = {} res = [] for t in tlista: exists[t.track_id] = 1 res.append(t) for t in tlistb: tid = t.track_id if not exists.get(tid, 0): exists[tid] = 1 res.append(t) return res def sub_stracks(tlista, tlistb): stracks = {} for t in tlista: stracks[t.track_id] = t for t in tlistb: tid = t.track_id if stracks.get(tid, 0): del stracks[tid] return list(stracks.values()) ================================================ FILE: yolox/tracker/diffusion_tracker_kl.py ================================================ import numpy as np from collections import deque import time import torch import torch.nn.functional as F import torchvision from copy import deepcopy from yolox.tracker import matching from detectron2.structures import Boxes from yolox.utils.box_ops import box_xyxy_to_cxcywh from yolox.utils.boxes import xyxy2cxcywh from torchvision.ops import box_iou,nms from yolox.utils.cluster_nms import cluster_nms from .kalman_filter import KalmanFilter from yolox.tracker import matching from .basetrack import BaseTrack, TrackState class STrack(BaseTrack): shared_kalman = KalmanFilter() def __init__(self, tlwh, score): # wait activate self._tlwh = np.asarray(tlwh, dtype=np.float) self.kalman_filter = None self.mean, self.covariance = None, None self.is_activated = False self.score = score self.tracklet_len = 0 def predict(self): mean_state = self.mean.copy() if self.state != TrackState.Tracked: mean_state[7] = 0 self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) @staticmethod def multi_predict(stracks): if len(stracks) > 0: multi_mean = np.asarray([st.mean.copy() for st in stracks]) multi_covariance = np.asarray([st.covariance for st in stracks]) for i, st in enumerate(stracks): if st.state != TrackState.Tracked: multi_mean[i][7] = 0 multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance) for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): stracks[i].mean = mean stracks[i].covariance = cov def activate(self, kalman_filter, frame_id): """Start a new tracklet""" self.kalman_filter = kalman_filter self.track_id = self.next_id() self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh)) self.tracklet_len = 0 self.state = TrackState.Tracked if frame_id == 1: self.is_activated = True # self.is_activated = True self.frame_id = frame_id self.start_frame = frame_id def re_activate(self, new_track, frame_id, new_id=False): self.mean, self.covariance = self.kalman_filter.update( self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh) ) self._tlwh=new_track.tlwh self.tracklet_len = 0 self.state = TrackState.Tracked self.is_activated = True self.frame_id = frame_id if new_id: self.track_id = self.next_id() self.score = new_track.score def update(self, new_track, frame_id): """ Update a matched track :type new_track: STrack :type frame_id: int :type update_feature: bool :return: """ self.frame_id = frame_id self.tracklet_len += 1 new_tlwh = new_track.tlwh self._tlwh=new_tlwh self.mean, self.covariance = self.kalman_filter.update( self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh)) self.state = TrackState.Tracked self.is_activated = True self.score = new_track.score @property # @jit(nopython=True) def tlwh(self): """Get current position in bounding box format `(top left x, top left y, width, height)`. """ if self.mean is None: return self._tlwh.copy() ret = self.mean[:4].copy() ret[2] *= ret[3] ret[:2] -= ret[2:] / 2 return ret @property # @jit(nopython=True) def tlbr(self): """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., `(top left, bottom right)`. """ ret = self.tlwh.copy() ret[2:] += ret[:2] return ret @staticmethod # @jit(nopython=True) def tlwh_to_xyah(tlwh): """Convert bounding box to format `(center x, center y, aspect ratio, height)`, where the aspect ratio is `width / height`. """ ret = np.asarray(tlwh).copy() ret[:2] += ret[2:] / 2 ret[2] /= ret[3] return ret def to_xyah(self): return self.tlwh_to_xyah(self.tlwh) @staticmethod # @jit(nopython=True) def tlbr_to_tlwh(tlbr): ret = np.asarray(tlbr).copy() ret[2:] -= ret[:2] return ret @staticmethod # @jit(nopython=True) def tlwh_to_tlbr(tlwh): ret = np.asarray(tlwh).copy() ret[2:] += ret[:2] return ret def __repr__(self): return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame) class DiffusionTracker(object): def __init__(self,model,tensor_type,conf_thresh=0.7,det_thresh=0.6,nms_thresh_3d=0.7,nms_thresh_2d=0.75,interval=5,detections=None): self.frame_id = 0 # BaseTrack._count=-1 self.backbone=model.backbone self.feature_projs=model.projs self.diffusion_model=model.head self.feature_extractor=self.diffusion_model.head.box_pooler self.det_thresh = det_thresh self.association_thresh = conf_thresh # self.low_det_thresh = 0.1 # self.low_association_thresh = 0.2 self.nms_thresh_2d=nms_thresh_2d self.nms_thresh_3d=nms_thresh_3d self.same_thresh=0.9 self.pre_features=None self.data_type=tensor_type self.detections=detections self.tracked_stracks = [] # type: list[STrack] self.lost_stracks = [] # type: list[STrack] self.removed_stracks = [] # type: list[STrack] self.max_time_lost = 30 self.kalman_filter = KalmanFilter() self.repeat_times=0 self.dynamic_time=True self.sampling_steps=1 self.num_boxes=500 self.track_t=400 self.mot17=False def update(self,cur_image): self.frame_id += 1 activated_starcks = [] refind_stracks = [] lost_stracks = [] removed_stracks = [] cur_features,mate_info=self.extract_feature(cur_image=cur_image) mate_shape,mate_device,mate_dtype=mate_info self.diffusion_model.device=mate_device self.diffusion_model.dtype=mate_dtype b,_,h,w=mate_shape images_whwh=torch.tensor([w, h, w, h], dtype=mate_dtype, device=mate_device)[None,:].expand(4*b,4) if self.frame_id==1: if self.pre_features is None: self.pre_features=cur_features inps=self.prepare_input(self.pre_features,cur_features) diffusion_outputs,conf_scores,association_time=self.diffusion_model.new_ddim_sample(inps,images_whwh,num_timesteps=self.sampling_steps,num_proposals=self.num_boxes, dynamic_time=self.dynamic_time,track_candidate=self.repeat_times) _,_,detections=self.diffusion_postprocess(diffusion_outputs,conf_scores,conf_thre=self.association_thresh,nms_thre=self.nms_thresh_3d) detections=self.diffusion_det_filt(detections,conf_thre=self.det_thresh,nms_thre=self.nms_thresh_2d) # detections=np.array(self.detections[self.frame_id]) # detections=detections[detections[:,5]>self.det_thresh] for det in detections: track=STrack(STrack.tlbr_to_tlwh(det[:4]), det[5]) track.activate(self.kalman_filter, self.frame_id) self.tracked_stracks.append(track) output_stracks = [track for track in self.tracked_stracks if track.is_activated] return output_stracks,association_time else: ref_bboxes=[STrack.tlwh_to_tlbr(track._tlwh) for track in self.tracked_stracks] inps=self.prepare_input(self.pre_features,cur_features) if len(ref_bboxes)>0: bboxes=box_xyxy_to_cxcywh(torch.tensor(np.array(ref_bboxes))).type(self.data_type).reshape(1,-1,4).repeat(2,1,1) else: bboxes=None # ref_num_proposals=self.proposal_schedule(len(ref_bboxes)) # ref_sampling_steps=self.sampling_steps_schedule(len(ref_bboxes)) diffusion_outputs,conf_scores,association_time=self.diffusion_model.new_ddim_sample(inps,images_whwh,num_timesteps=self.sampling_steps,num_proposals=self.num_boxes, ref_targets=bboxes,dynamic_time=self.dynamic_time,track_candidate=self.repeat_times,diffusion_t=self.track_t) diffusion_ref_detections,diffusion_track_detections,detections=self.diffusion_postprocess(diffusion_outputs, conf_scores, conf_thre=self.association_thresh, nms_thre=self.nms_thresh_3d) detections=self.diffusion_det_filt(detections,conf_thre=self.det_thresh,nms_thre=self.nms_thresh_2d) # detections=np.array(self.detections[self.frame_id]) # if len(detections)>0: # detections=detections[detections[:,5]>self.det_thresh] diffusion_ref_detections,diffusion_track_detections=self.diffusion_track_filt(diffusion_ref_detections, diffusion_track_detections, conf_thre=self.det_thresh, nms_thre=self.nms_thresh_2d) start_time=time.time() STrack.multi_predict(self.tracked_stracks) dists = matching.iou_distance(ref_bboxes, diffusion_ref_detections[:,:4]) matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.same_thresh) if len(matches)>0: # fix position with detection result dists_fix=matching.iou_distance(diffusion_track_detections[matches[:,1],:4],detections[:,:4]) matches_fix, u_track_fix, u_detection_fix = matching.linear_assignment(dists_fix, thresh=self.same_thresh) if len(matches_fix)>0: diffusion_track_detections[matches[:,1]][matches_fix[:,0],:4]=detections[matches_fix[:,1],:4] # filt detection with tracked result detections=detections[u_detection_fix] ref_box_t=[] track_box_t=[] for itracked, idet in matches: track = self.tracked_stracks[itracked] ref_box_t.append(STrack.tlwh_to_tlbr(track._tlwh)) det = diffusion_track_detections[idet] track_box_t.append(det[:4]) new_strack=STrack(STrack.tlbr_to_tlwh(det[:4]), det[5]) if track.state == TrackState.Tracked: track.update(new_strack, self.frame_id) activated_starcks.append(track) else: track.re_activate(new_strack, self.frame_id, new_id=False) refind_stracks.append(track) if len(ref_box_t)>0: self.track_t=self.extract_mean_track_t(np.array(ref_box_t),np.array(track_box_t)) for it in u_track: track = self.tracked_stracks[it] if not track.state == TrackState.Lost: track.mark_lost() lost_stracks.append(track) STrack.multi_predict(self.lost_stracks) dists_lost = matching.iou_distance([track.tlbr for track in self.lost_stracks], detections[:4]) matches_lost, u_track_lost, u_detection_lost = matching.linear_assignment(dists_lost, thresh=self.same_thresh) for itracked, idet in matches_lost: track = self.lost_stracks[itracked] det = detections[idet] new_strack=STrack(STrack.tlbr_to_tlwh(det[:4]), det[5]) if track.state == TrackState.Tracked: track.update(new_strack, self.frame_id) activated_starcks.append(track) else: track.re_activate(new_strack, self.frame_id, new_id=False) refind_stracks.append(track) for inew in u_detection_lost: # for inew in range(len(detections)): det = detections[inew] track=STrack(STrack.tlbr_to_tlwh(det[:4]), det[5]) track.activate(self.kalman_filter, self.frame_id) activated_starcks.append(track) for track in self.lost_stracks: if self.frame_id - track.end_frame > self.max_time_lost: track.mark_removed() removed_stracks.append(track) self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked] self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks) self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks) self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks) self.lost_stracks.extend(lost_stracks) self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks) self.removed_stracks.extend(removed_stracks) self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks) # get scores of lost tracks self.pre_features=cur_features output_stracks = [track for track in self.tracked_stracks] return output_stracks,association_time+time.time()-start_time def extract_feature(self,cur_image): fpn_outs=self.backbone(cur_image) cur_features=[] for proj,l_feat in zip(self.feature_projs,fpn_outs): cur_features.append(proj(l_feat)) mate_info=(cur_image.shape,cur_image.device,cur_image.dtype) return cur_features,mate_info def extract_mean_track_t(self,pre_box,cur_box): # "xyxy" pre_box=xyxy2cxcywh(pre_box) cur_box=xyxy2cxcywh(cur_box) abs_box=np.abs(pre_box-cur_box) abs_percent=np.sum(abs_box/(pre_box+1e-5),axis=1)/4 track_t=np.mean(abs_percent) # min(max(int(track_t*1000),1),999) # min(max(int((np.exp(track_t)-1)/(np.exp(0)-1)*1000),1),999) # min(max(int(np.log(track_t+1)/np.log(2)*1000),1),999) return min(max(int(track_t*1000),1),999) def diffusion_postprocess(self,diffusion_outputs,conf_scores,nms_thre=0.7,conf_thre=0.6): pre_prediction,cur_prediction=diffusion_outputs.split(len(diffusion_outputs)//2,dim=0) output = [None for _ in range(len(pre_prediction))] for i,(pre_image_pred,cur_image_pred,association_score) in enumerate(zip(pre_prediction,cur_prediction,conf_scores)): association_score=association_score.flatten() # If none are remaining => process next image if not pre_image_pred.size(0): continue # _, conf_mask = torch.topk((image_pred[:, 4] * class_conf.squeeze()), 1000) # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred) detections=torch.zeros((2,len(cur_image_pred),7),dtype=cur_image_pred.dtype,device=cur_image_pred.device) detections[0,:,:4]=pre_image_pred[:,:4] detections[1,:,:4]=cur_image_pred[:,:4] detections[0,:,4]=association_score detections[1,:,4]=association_score detections[0,:,5]=torch.sqrt(torch.sigmoid(pre_image_pred[:,4])*association_score) detections[1,:,5]=torch.sqrt(torch.sigmoid(cur_image_pred[:,4])*association_score) score_out_index=association_score>conf_thre # strategy=torch.mean # value=strategy(detections[:,:,5],dim=0,keepdim=False) # score_out_index=value>conf_thre detections=detections[:,score_out_index,:] if not detections.size(1): output[i]=detections continue nms_out_index_3d = cluster_nms( detections[0,:,:4], detections[1,:,:4], # value[score_out_index], detections[0,:,4], iou_threshold=nms_thre) detections = detections[:,nms_out_index_3d,:] if output[i] is None: output[i] = detections else: output[i] = torch.cat((output[i], detections)) return output[0][0],output[0][1],torch.cat([output[1][0],output[1][1]],dim=0) if len(output)>=2 else None def diffusion_track_filt(self,ref_detections,track_detections,conf_thre=0.6,nms_thre=0.7): if not ref_detections.size(1): return ref_detections.cpu().numpy(),track_detections.cpu().numpy() scores=ref_detections[:,5] score_out_index=scores>conf_thre ref_detections=ref_detections[score_out_index] track_detections=track_detections[score_out_index] nms_out_index = torchvision.ops.batched_nms( ref_detections[:, :4], ref_detections[:, 5], ref_detections[:, 6], nms_thre, ) return ref_detections[nms_out_index].cpu().numpy(),track_detections[nms_out_index].cpu().numpy() def diffusion_det_filt(self,diffusion_detections,conf_thre=0.6,nms_thre=0.7): if not diffusion_detections.size(1): return diffusion_detections.cpu().numpy() scores=diffusion_detections[:,5] score_out_index=scores>conf_thre diffusion_detections=diffusion_detections[score_out_index] nms_out_index = torchvision.ops.batched_nms( diffusion_detections[:, :4], diffusion_detections[:, 5], diffusion_detections[:, 6], nms_thre, ) return diffusion_detections[nms_out_index].cpu().numpy() def proposal_schedule(self,num_ref_bboxes): # simple strategy return 16*num_ref_bboxes def sampling_steps_schedule(self,num_ref_bboxes): min_sampling_steps=1 max_sampling_steps=4 min_num_bboxes=10 max_num_bboxes=100 ref_sampling_steps=(num_ref_bboxes-min_num_bboxes)*(max_sampling_steps-min_sampling_steps)/(max_num_bboxes-min_num_bboxes)+min_sampling_steps return min(max(int(ref_sampling_steps),min_sampling_steps),max_sampling_steps) def vote_to_remove_candidate(self,track_ids,detections,vote_iou_thres=0.75,sorted=False,descending=False): box_pred_per_image, scores_per_image=detections[:,:4],detections[:,4]*detections[:,5] score_track_indices=torch.argsort((track_ids+scores_per_image),descending=True) track_ids=track_ids[score_track_indices] scores_per_image=scores_per_image[score_track_indices] box_pred_per_image=box_pred_per_image[score_track_indices] assert len(track_ids)==box_pred_per_image.shape[0] # vote guarantee only one track id in track candidates keep_mask = torch.zeros_like(scores_per_image, dtype=torch.bool) for class_id in torch.unique(track_ids): curr_indices = torch.where(track_ids == class_id)[0] curr_keep_indices = nms(box_pred_per_image[curr_indices],scores_per_image[curr_indices],vote_iou_thres) candidate_iou_indices=box_iou(box_pred_per_image[curr_indices],box_pred_per_image[curr_indices])>vote_iou_thres counter=[] for cluster_indice in candidate_iou_indices[curr_keep_indices]: cluster_scores=scores_per_image[curr_indices][cluster_indice] counter.append(len(cluster_scores)+torch.mean(cluster_scores)) max_indice=torch.argmax(torch.tensor(counter).type(self.data_type)) keep_mask[curr_indices[curr_keep_indices][max_indice]] = True keep_indices = torch.where(keep_mask)[0] track_ids=track_ids[keep_indices] box_pred_per_image=box_pred_per_image[keep_indices] scores_per_image=scores_per_image[keep_indices] if sorted and not descending: descending_indices=torch.argsort(track_ids) track_ids=track_ids[descending_indices] box_pred_per_image=box_pred_per_image[descending_indices] scores_per_image=scores_per_image[descending_indices] return track_ids.cpu().numpy(),box_pred_per_image.cpu().numpy(),scores_per_image.cpu().numpy() def prepare_input(self,pre_features,cur_features): inps_pre_features=[] inps_cur_Features=[] for l_pre_feat,l_cur_feat in zip(pre_features,cur_features): inps_pre_features.append(torch.cat([l_pre_feat.clone(),l_cur_feat.clone()],dim=0)) inps_cur_Features.append(torch.cat([l_cur_feat.clone(),l_cur_feat.clone()],dim=0)) return (inps_pre_features,inps_cur_Features) # def get_targets_from_tracklet_db(self): # ref_mask=self.tracklet_db[:,-1,:5].sum(-1)>0 # ref_bbox=deepcopy(self.tracklet_db[ref_mask,-1,:4]) # ref_track_ids=np.arange(len(self.tracklet_db))[ref_mask] # return ref_bbox,ref_track_ids def joint_stracks(tlista, tlistb): exists = {} res = [] for t in tlista: exists[t.track_id] = 1 res.append(t) for t in tlistb: tid = t.track_id if not exists.get(tid, 0): exists[tid] = 1 res.append(t) return res def sub_stracks(tlista, tlistb): stracks = {} for t in tlista: stracks[t.track_id] = t for t in tlistb: tid = t.track_id if stracks.get(tid, 0): del stracks[tid] return list(stracks.values()) from sklearn.metrics.pairwise import cosine_similarity def remove_duplicate_stracks(stracksa, stracksb): pdist = matching.iou_distance(stracksa, stracksb) # if len(stracksa)>0 and len(stracksb)>0: # # fix a derection bug # pcosdist=cosine_similarity( # [track.mean[4:6] for track in stracksa], # [track.mean[4:6] for track in stracksb]) # pdist=(pdist+pcosdist)/2 pairs = np.where(pdist < 0.15) dupa, dupb = list(), list() for p, q in zip(*pairs): timep = stracksa[p].frame_id - stracksa[p].start_frame timeq = stracksb[q].frame_id - stracksb[q].start_frame if stracksa[p].mean is not None and stracksb[q].mean is not None: x,y=stracksa[p].mean[4:6],stracksa[p].mean[4:6] cosine_dist=1-np.dot(x,y)/(np.linalg.norm(x)*np.linalg.norm(y)+1e-06) if cosine_dist>0.15: continue if timep > timeq: dupb.append(q) else: dupa.append(p) resa = [t for i, t in enumerate(stracksa) if not i in dupa] resb = [t for i, t in enumerate(stracksb) if not i in dupb] return resa, resb ================================================ FILE: yolox/tracker/kalman_filter.py ================================================ # vim: expandtab:ts=4:sw=4 import numpy as np import scipy.linalg """ Table for the 0.95 quantile of the chi-square distribution with N degrees of freedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv function and used as Mahalanobis gating threshold. """ chi2inv95 = { 1: 3.8415, 2: 5.9915, 3: 7.8147, 4: 9.4877, 5: 11.070, 6: 12.592, 7: 14.067, 8: 15.507, 9: 16.919} class KalmanFilter(object): """ A simple Kalman filter for tracking bounding boxes in image space. The 8-dimensional state space x, y, a, h, vx, vy, va, vh contains the bounding box center position (x, y), aspect ratio a, height h, and their respective velocities. Object motion follows a constant velocity model. The bounding box location (x, y, a, h) is taken as direct observation of the state space (linear observation model). """ def __init__(self): ndim, dt = 4, 1. # Create Kalman filter model matrices. self._motion_mat = np.eye(2 * ndim, 2 * ndim) for i in range(ndim): self._motion_mat[i, ndim + i] = dt self._update_mat = np.eye(ndim, 2 * ndim) # Motion and observation uncertainty are chosen relative to the current # state estimate. These weights control the amount of uncertainty in # the model. This is a bit hacky. self._std_weight_position = 1. / 20 self._std_weight_velocity = 1. / 160 def initiate(self, measurement): """Create track from unassociated measurement. Parameters ---------- measurement : ndarray Bounding box coordinates (x, y, a, h) with center position (x, y), aspect ratio a, and height h. Returns ------- (ndarray, ndarray) Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of the new track. Unobserved velocities are initialized to 0 mean. """ mean_pos = measurement mean_vel = np.zeros_like(mean_pos) mean = np.r_[mean_pos, mean_vel] std = [ 2 * self._std_weight_position * measurement[3], 2 * self._std_weight_position * measurement[3], 1e-2, 2 * self._std_weight_position * measurement[3], 10 * self._std_weight_velocity * measurement[3], 10 * self._std_weight_velocity * measurement[3], 1e-5, 10 * self._std_weight_velocity * measurement[3]] covariance = np.diag(np.square(std)) return mean, covariance def predict(self, mean, covariance): """Run Kalman filter prediction step. Parameters ---------- mean : ndarray The 8 dimensional mean vector of the object state at the previous time step. covariance : ndarray The 8x8 dimensional covariance matrix of the object state at the previous time step. Returns ------- (ndarray, ndarray) Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. """ std_pos = [ self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-2, self._std_weight_position * mean[3]] std_vel = [ self._std_weight_velocity * mean[3], self._std_weight_velocity * mean[3], 1e-5, self._std_weight_velocity * mean[3]] motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) #mean = np.dot(self._motion_mat, mean) mean = np.dot(mean, self._motion_mat.T) covariance = np.linalg.multi_dot(( self._motion_mat, covariance, self._motion_mat.T)) + motion_cov return mean, covariance def project(self, mean, covariance): """Project state distribution to measurement space. Parameters ---------- mean : ndarray The state's mean vector (8 dimensional array). covariance : ndarray The state's covariance matrix (8x8 dimensional). Returns ------- (ndarray, ndarray) Returns the projected mean and covariance matrix of the given state estimate. """ std = [ self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-1, self._std_weight_position * mean[3]] innovation_cov = np.diag(np.square(std)) mean = np.dot(self._update_mat, mean) covariance = np.linalg.multi_dot(( self._update_mat, covariance, self._update_mat.T)) return mean, covariance + innovation_cov def multi_predict(self, mean, covariance): """Run Kalman filter prediction step (Vectorized version). Parameters ---------- mean : ndarray The Nx8 dimensional mean matrix of the object states at the previous time step. covariance : ndarray The Nx8x8 dimensional covariance matrics of the object states at the previous time step. Returns ------- (ndarray, ndarray) Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are initialized to 0 mean. """ std_pos = [ self._std_weight_position * mean[:, 3], self._std_weight_position * mean[:, 3], 1e-2 * np.ones_like(mean[:, 3]), self._std_weight_position * mean[:, 3]] std_vel = [ self._std_weight_velocity * mean[:, 3], self._std_weight_velocity * mean[:, 3], 1e-5 * np.ones_like(mean[:, 3]), self._std_weight_velocity * mean[:, 3]] sqr = np.square(np.r_[std_pos, std_vel]).T motion_cov = [] for i in range(len(mean)): motion_cov.append(np.diag(sqr[i])) motion_cov = np.asarray(motion_cov) mean = np.dot(mean, self._motion_mat.T) left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) covariance = np.dot(left, self._motion_mat.T) + motion_cov return mean, covariance def update(self, mean, covariance, measurement): """Run Kalman filter correction step. Parameters ---------- mean : ndarray The predicted state's mean vector (8 dimensional). covariance : ndarray The state's covariance matrix (8x8 dimensional). measurement : ndarray The 4 dimensional measurement vector (x, y, a, h), where (x, y) is the center position, a the aspect ratio, and h the height of the bounding box. Returns ------- (ndarray, ndarray) Returns the measurement-corrected state distribution. """ projected_mean, projected_cov = self.project(mean, covariance) chol_factor, lower = scipy.linalg.cho_factor( projected_cov, lower=True, check_finite=False) kalman_gain = scipy.linalg.cho_solve( (chol_factor, lower), np.dot(covariance, self._update_mat.T).T, check_finite=False).T innovation = measurement - projected_mean new_mean = mean + np.dot(innovation, kalman_gain.T) new_covariance = covariance - np.linalg.multi_dot(( kalman_gain, projected_cov, kalman_gain.T)) return new_mean, new_covariance def gating_distance(self, mean, covariance, measurements, only_position=False, metric='maha'): """Compute gating distance between state distribution and measurements. A suitable distance threshold can be obtained from `chi2inv95`. If `only_position` is False, the chi-square distribution has 4 degrees of freedom, otherwise 2. Parameters ---------- mean : ndarray Mean vector over the state distribution (8 dimensional). covariance : ndarray Covariance of the state distribution (8x8 dimensional). measurements : ndarray An Nx4 dimensional matrix of N measurements, each in format (x, y, a, h) where (x, y) is the bounding box center position, a the aspect ratio, and h the height. only_position : Optional[bool] If True, distance computation is done with respect to the bounding box center position only. Returns ------- ndarray Returns an array of length N, where the i-th element contains the squared Mahalanobis distance between (mean, covariance) and `measurements[i]`. """ mean, covariance = self.project(mean, covariance) if only_position: mean, covariance = mean[:2], covariance[:2, :2] measurements = measurements[:, :2] d = measurements - mean if metric == 'gaussian': return np.sum(d * d, axis=1) elif metric == 'maha': cholesky_factor = np.linalg.cholesky(covariance) z = scipy.linalg.solve_triangular( cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True) squared_maha = np.sum(z * z, axis=0) return squared_maha else: raise ValueError('invalid distance metric') ================================================ FILE: yolox/tracker/matching.py ================================================ import cv2 import numpy as np import scipy import lap from scipy.spatial.distance import cdist from cython_bbox import bbox_overlaps as bbox_ious import time def merge_matches(m1, m2, shape): O,P,Q = shape m1 = np.asarray(m1) m2 = np.asarray(m2) M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P)) M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q)) mask = M1*M2 match = mask.nonzero() match = list(zip(match[0], match[1])) unmatched_O = tuple(set(range(O)) - set([i for i, j in match])) unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match])) return match, unmatched_O, unmatched_Q def _indices_to_matches(cost_matrix, indices, thresh): matched_cost = cost_matrix[tuple(zip(*indices))] matched_mask = (matched_cost <= thresh) matches = indices[matched_mask] unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0])) unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1])) return matches, unmatched_a, unmatched_b def linear_assignment(cost_matrix, thresh): if cost_matrix.size == 0: return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1])) matches, unmatched_a, unmatched_b = [], [], [] cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh) for ix, mx in enumerate(x): if mx >= 0: matches.append([ix, mx]) unmatched_a = np.where(x < 0)[0] unmatched_b = np.where(y < 0)[0] matches = np.asarray(matches) return matches, unmatched_a, unmatched_b def ious(atlbrs, btlbrs): """ Compute cost based on IoU :type atlbrs: list[tlbr] | np.ndarray :type atlbrs: list[tlbr] | np.ndarray :rtype ious np.ndarray """ ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float) if ious.size == 0: return ious ious = bbox_ious( np.ascontiguousarray(atlbrs, dtype=np.float), np.ascontiguousarray(btlbrs, dtype=np.float) ) return ious def iou_distance(atracks, btracks): """ Compute cost based on IoU :type atracks: list[STrack] :type btracks: list[STrack] :rtype cost_matrix np.ndarray """ if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): atlbrs = atracks btlbrs = btracks else: atlbrs = [track.tlbr for track in atracks] btlbrs = [track.tlbr for track in btracks] _ious = ious(atlbrs, btlbrs) cost_matrix = 1 - _ious return cost_matrix def v_iou_distance(atracks, btracks): """ Compute cost based on IoU :type atracks: list[STrack] :type btracks: list[STrack] :rtype cost_matrix np.ndarray """ if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): atlbrs = atracks btlbrs = btracks else: atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks] btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks] _ious = ious(atlbrs, btlbrs) cost_matrix = 1 - _ious return cost_matrix def embedding_distance(tracks, detections, metric='cosine'): """ :param tracks: list[STrack] :param detections: list[BaseTrack] :param metric: :return: cost_matrix np.ndarray """ cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float) if cost_matrix.size == 0: return cost_matrix det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float) #for i, track in enumerate(tracks): #cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric)) track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float) cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Nomalized features return cost_matrix # def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False): # if cost_matrix.size == 0: # return cost_matrix # gating_dim = 2 if only_position else 4 # gating_threshold = kalman_filter.chi2inv95[gating_dim] # measurements = np.asarray([det.to_xyah() for det in detections]) # for row, track in enumerate(tracks): # gating_distance = kf.gating_distance( # track.mean, track.covariance, measurements, only_position) # cost_matrix[row, gating_distance > gating_threshold] = np.inf # return cost_matrix # def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98): # if cost_matrix.size == 0: # return cost_matrix # gating_dim = 2 if only_position else 4 # gating_threshold = kalman_filter.chi2inv95[gating_dim] # measurements = np.asarray([det.to_xyah() for det in detections]) # for row, track in enumerate(tracks): # gating_distance = kf.gating_distance( # track.mean, track.covariance, measurements, only_position, metric='maha') # cost_matrix[row, gating_distance > gating_threshold] = np.inf # cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance # return cost_matrix def fuse_iou(cost_matrix, tracks, detections): if cost_matrix.size == 0: return cost_matrix reid_sim = 1 - cost_matrix iou_dist = iou_distance(tracks, detections) iou_sim = 1 - iou_dist fuse_sim = reid_sim * (1 + iou_sim) / 2 det_scores = np.array([det.score for det in detections]) det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) #fuse_sim = fuse_sim * (1 + det_scores) / 2 fuse_cost = 1 - fuse_sim return fuse_cost def fuse_score(cost_matrix, scores): if cost_matrix.size == 0: return cost_matrix iou_sim = 1 - cost_matrix det_scores = np.array([score for score in scores]) det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) fuse_sim = iou_sim * det_scores fuse_cost = 1 - fuse_sim return fuse_cost ================================================ FILE: yolox/tracking_utils/evaluation.py ================================================ import os import numpy as np import copy import motmetrics as mm mm.lap.default_solver = 'lap' from yolox.tracking_utils.io import read_results, unzip_objs class Evaluator(object): def __init__(self, data_root, seq_name, data_type): self.data_root = data_root self.seq_name = seq_name self.data_type = data_type self.load_annotations() self.reset_accumulator() def load_annotations(self): assert self.data_type == 'mot' gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt') self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True) self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True) def reset_accumulator(self): self.acc = mm.MOTAccumulator(auto_id=True) def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False): # results trk_tlwhs = np.copy(trk_tlwhs) trk_ids = np.copy(trk_ids) # gts gt_objs = self.gt_frame_dict.get(frame_id, []) gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2] # ignore boxes ignore_objs = self.gt_ignore_frame_dict.get(frame_id, []) ignore_tlwhs = unzip_objs(ignore_objs)[0] # remove ignored results keep = np.ones(len(trk_tlwhs), dtype=bool) iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5) if len(iou_distance) > 0: match_is, match_js = mm.lap.linear_sum_assignment(iou_distance) match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js]) match_ious = iou_distance[match_is, match_js] match_js = np.asarray(match_js, dtype=int) match_js = match_js[np.logical_not(np.isnan(match_ious))] keep[match_js] = False trk_tlwhs = trk_tlwhs[keep] trk_ids = trk_ids[keep] #match_is, match_js = mm.lap.linear_sum_assignment(iou_distance) #match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js]) #match_ious = iou_distance[match_is, match_js] #match_js = np.asarray(match_js, dtype=int) #match_js = match_js[np.logical_not(np.isnan(match_ious))] #keep[match_js] = False #trk_tlwhs = trk_tlwhs[keep] #trk_ids = trk_ids[keep] # get distance matrix iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5) # acc self.acc.update(gt_ids, trk_ids, iou_distance) if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'): events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics else: events = None return events def eval_file(self, filename): self.reset_accumulator() result_frame_dict = read_results(filename, self.data_type, is_gt=False) #frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys()))) frames = sorted(list(set(result_frame_dict.keys()))) for frame_id in frames: trk_objs = result_frame_dict.get(frame_id, []) trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2] self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False) return self.acc @staticmethod def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')): names = copy.deepcopy(names) if metrics is None: metrics = mm.metrics.motchallenge_metrics metrics = copy.deepcopy(metrics) mh = mm.metrics.create() summary = mh.compute_many( accs, metrics=metrics, names=names, generate_overall=True ) return summary @staticmethod def save_summary(summary, filename): import pandas as pd writer = pd.ExcelWriter(filename) summary.to_excel(writer) writer.save() ================================================ FILE: yolox/tracking_utils/io.py ================================================ import os from typing import Dict import numpy as np def write_results(filename, results_dict: Dict, data_type: str): if not filename: return path = os.path.dirname(filename) if not os.path.exists(path): os.makedirs(path) if data_type in ('mot', 'mcmot', 'lab'): save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n' elif data_type == 'kitti': save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\n' else: raise ValueError(data_type) with open(filename, 'w') as f: for frame_id, frame_data in results_dict.items(): if data_type == 'kitti': frame_id -= 1 for tlwh, track_id in frame_data: if track_id < 0: continue x1, y1, w, h = tlwh x2, y2 = x1 + w, y1 + h line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0) f.write(line) def read_results(filename, data_type: str, is_gt=False, is_ignore=False): if data_type in ('mot', 'lab'): read_fun = read_mot_results else: raise ValueError('Unknown data type: {}'.format(data_type)) return read_fun(filename, is_gt, is_ignore) """ labels={'ped', ... % 1 'person_on_vhcl', ... % 2 'car', ... % 3 'bicycle', ... % 4 'mbike', ... % 5 'non_mot_vhcl', ... % 6 'static_person', ... % 7 'distractor', ... % 8 'occluder', ... % 9 'occluder_on_grnd', ... %10 'occluder_full', ... % 11 'reflection', ... % 12 'crowd' ... % 13 }; """ def read_mot_results(filename, is_gt, is_ignore): valid_labels = {1} ignore_labels = {2, 7, 8, 12} results_dict = dict() if os.path.isfile(filename): with open(filename, 'r') as f: for line in f.readlines(): linelist = line.split(',') if len(linelist) < 7: continue fid = int(linelist[0]) if fid < 1: continue results_dict.setdefault(fid, list()) box_size = float(linelist[4]) * float(linelist[5]) if is_gt: if 'MOT16-' in filename or 'MOT17-' in filename: label = int(float(linelist[7])) mark = int(float(linelist[6])) if mark == 0 or label not in valid_labels: continue score = 1 elif is_ignore: if 'MOT16-' in filename or 'MOT17-' in filename: label = int(float(linelist[7])) vis_ratio = float(linelist[8]) if label not in ignore_labels and vis_ratio >= 0: continue else: continue score = 1 else: score = float(linelist[6]) #if box_size > 7000: #if box_size <= 7000 or box_size >= 15000: #if box_size < 15000: #continue tlwh = tuple(map(float, linelist[2:6])) target_id = int(linelist[1]) results_dict[fid].append((tlwh, target_id, score)) return results_dict def unzip_objs(objs): if len(objs) > 0: tlwhs, ids, scores = zip(*objs) else: tlwhs, ids, scores = [], [], [] tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4) return tlwhs, ids, scores ================================================ FILE: yolox/tracking_utils/timer.py ================================================ import time class Timer(object): """A simple timer.""" def __init__(self): self.total_time = 0. self.calls = 0 self.start_time = 0. self.diff = 0. self.average_time = 0. self.duration = 0. def tic(self): # using time.time instead of time.clock because time time.clock # does not normalize for multithreading self.start_time = time.time() def toc(self, average=True): self.diff = time.time() - self.start_time self.total_time += self.diff self.calls += 1 self.average_time = self.total_time / self.calls if average: self.duration = self.average_time else: self.duration = self.diff return self.duration def clear(self): self.total_time = 0. self.calls = 0 self.start_time = 0. self.diff = 0. self.average_time = 0. self.duration = 0. ================================================ FILE: yolox/utils/__init__.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. from .allreduce_norm import * from .boxes import * from .checkpoint import load_ckpt, save_checkpoint from .demo_utils import * from .dist import * from .ema import ModelEMA from .logger import setup_logger from .lr_scheduler import LRScheduler from .metric import * from .model_utils import * from .setup_env import * from .visualize import * ================================================ FILE: yolox/utils/allreduce_norm.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import torch from torch import distributed as dist from torch import nn import pickle from collections import OrderedDict from .dist import _get_global_gloo_group, get_world_size ASYNC_NORM = ( nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d, ) __all__ = [ "get_async_norm_states", "pyobj2tensor", "tensor2pyobj", "all_reduce", "all_reduce_norm", ] def get_async_norm_states(module): async_norm_states = OrderedDict() for name, child in module.named_modules(): if isinstance(child, ASYNC_NORM): for k, v in child.state_dict().items(): async_norm_states[".".join([name, k])] = v return async_norm_states def pyobj2tensor(pyobj, device="cuda"): """serialize picklable python object to tensor""" storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj)) return torch.ByteTensor(storage).to(device=device) def tensor2pyobj(tensor): """deserialize tensor to picklable python object""" return pickle.loads(tensor.cpu().numpy().tobytes()) def _get_reduce_op(op_name): return { "sum": dist.ReduceOp.SUM, "mean": dist.ReduceOp.SUM, }[op_name.lower()] def all_reduce(py_dict, op="sum", group=None): """ Apply all reduce function for python dict object. NOTE: make sure that every py_dict has the same keys and values are in the same shape. Args: py_dict (dict): dict to apply all reduce op. op (str): operator, could be "sum" or "mean". """ world_size = get_world_size() if world_size == 1: return py_dict if group is None: group = _get_global_gloo_group() if dist.get_world_size(group) == 1: return py_dict # all reduce logic across different devices. py_key = list(py_dict.keys()) py_key_tensor = pyobj2tensor(py_key) dist.broadcast(py_key_tensor, src=0) py_key = tensor2pyobj(py_key_tensor) tensor_shapes = [py_dict[k].shape for k in py_key] tensor_numels = [py_dict[k].numel() for k in py_key] flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key]) dist.all_reduce(flatten_tensor, op=_get_reduce_op(op)) if op == "mean": flatten_tensor /= world_size split_tensors = [ x.reshape(shape) for x, shape in zip(torch.split(flatten_tensor, tensor_numels), tensor_shapes) ] return OrderedDict({k: v for k, v in zip(py_key, split_tensors)}) def all_reduce_norm(module): """ All reduce norm statistics in different devices. """ states = get_async_norm_states(module) states = all_reduce(states, op="mean") module.load_state_dict(states, strict=False) ================================================ FILE: yolox/utils/box_ops.py ================================================ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ Utilities for bounding box manipulation and GIoU. """ import torch from torchvision.ops.boxes import box_area from yolox.utils.cluster_nms import giou_3d def box_cxcywh_to_xyxy(x): x_c, y_c, w, h = x.unbind(-1) b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] return torch.stack(b, dim=-1) def box_xyxy_to_cxcywh(x): x0, y0, x1, y1 = x.unbind(-1) b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] return torch.stack(b, dim=-1) # modified from torchvision to also return the union def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] wh = (rb - lt).clamp(min=0) # [N,M,2] inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union def generalized_box_iou(boxes1,boxes2,boxes3,boxes4): """ Generalized IoU from https://giou.stanford.edu/ The boxes should be in [x0, y0, x1, y1] format Returns a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check # boxes1=boxes1.float() # boxes2=boxes2.float() # boxes3=boxes3.float() # boxes4=boxes4.float() assert (boxes1[:, 2:] >= boxes1[:, :2]).all() assert (boxes2[:, 2:] >= boxes2[:, :2]).all() assert (boxes3[:, 2:] >= boxes3[:, :2]).all() assert (boxes4[:, 2:] >= boxes4[:, :2]).all() # iou1, union1 = box_iou(boxes1, boxes3) # iou2, union2 = box_iou(boxes2, boxes4) # lt = torch.min(boxes1[:, None, :2], boxes3[:, :2]) # rb = torch.max(boxes1[:, None, 2:], boxes3[:, 2:]) # wh = (rb - lt).clamp(min=0) # [N,M,2] # area1 = wh[:, :, 0] * wh[:, :, 1] # lt = torch.min(boxes2[:, None, :2], boxes4[:, :2]) # rb = torch.max(boxes2[:, None, 2:], boxes4[:, 2:]) # wh = (rb - lt).clamp(min=0) # [N,M,2] # area2 = wh[:, :, 0] * wh[:, :, 1] # uiou=(iou1*union1+iou2*union2)/(union1+union2) # uunion=union1+union2 # uarea=area1+area2 # return uiou- (uarea - uunion) / uarea return giou_3d(boxes1,boxes3,boxes2,boxes4) def masks_to_boxes(masks): """Compute the bounding boxes around the provided masks The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. Returns a [N, 4] tensors, with the boxes in xyxy format """ if masks.numel() == 0: return torch.zeros((0, 4), device=masks.device) h, w = masks.shape[-2:] y = torch.arange(0, h, dtype=torch.float) x = torch.arange(0, w, dtype=torch.float) y, x = torch.meshgrid(y, x) x_mask = (masks * x.unsqueeze(0)) x_max = x_mask.flatten(1).max(-1)[0] x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] y_mask = (masks * y.unsqueeze(0)) y_max = y_mask.flatten(1).max(-1)[0] y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] return torch.stack([x_min, y_min, x_max, y_max], 1) # boxes = targets[:, :4].copy() # labels = targets[:, 4].copy() # ids = targets[:, 5].copy() # if len(boxes) == 0: # targets = np.zeros((self.max_labels, 6), dtype=np.float32) # image, r_o = preproc(image, input_dim, self.means, self.std) # image = np.ascontiguousarray(image, dtype=np.float32) # return image, targets # image_o = image.copy() # targets_o = targets.copy() # height_o, width_o, _ = image_o.shape # boxes_o = targets_o[:, :4] # labels_o = targets_o[:, 4] # ids_o = targets_o[:, 5] # # bbox_o: [xyxy] to [c_x,c_y,w,h] # boxes_o = xyxy2cxcywh(boxes_o) # image_t = _distort(image) # image_t, boxes_t ,image_r,boxes_r= _mirror(image_t, boxes) # height, width, _ = image_t.shape # image_t, r_t = preproc(image_t, input_dim, self.means, self.std) # image_t, r_r = preproc(image_r, input_dim, self.means, self.std) # # boxes [xyxy] 2 [cx,cy,w,h] # boxes_t = xyxy2cxcywh(boxes_t) # boxes_t *= r_t # boxes_r = xyxy2cxcywh(boxes_r) # boxes_r *= r_r # mask_b = np.minimum(boxes_t[:, 2], boxes_t[:, 3]) > 1 # boxes_t = boxes_t[mask_b] # boxes_r = boxes_r[mask_b] # labels_t = labels[mask_b] # ids_t = ids[mask_b] # if len(boxes_t) == 0: # image_t, r_o = preproc(image_o, input_dim, self.means, self.std) # boxes_o *= r_o # boxes_t = boxes_o # image_r=image_t # boxes_r=boxes_t # labels_t = labels_o # ids_t = ids_o # labels_t = np.expand_dims(labels_t, 1) # ids_t = np.expand_dims(ids_t, 1) # targets_t = np.hstack((labels_t, boxes_t, ids_t)) # padded_labels = np.zeros((self.max_labels, 6)) # padded_labels[range(len(targets_t))[: self.max_labels]] = targets_t[ # : self.max_labels # ] # targets_r = np.hstack((labels_t, boxes_r, ids_t)) # padded_labels_r = np.zeros((self.max_labels, 6)) # padded_labels_r[range(len(targets_r))[: self.max_labels]] = targets_r[ # : self.max_labels # ] # padded_labels = np.ascontiguousarray(padded_labels, dtype=np.float32) # image_t = np.ascontiguousarray(image_t, dtype=np.float32) # return image_t, padded_labels ================================================ FILE: yolox/utils/boxes.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import numpy as np import torch import torchvision import torch.nn.functional as F from .cluster_nms import cluster_nms __all__ = [ "filter_box", "postprocess", "diffusion_postprocess", "bboxes_iou", "matrix_iou", "adjust_box_anns", "xyxy2xywh", "xyxy2cxcywh", ] def filter_box(output, scale_range): """ output: (N, 5+class) shape """ min_scale, max_scale = scale_range w = output[:, 2] - output[:, 0] h = output[:, 3] - output[:, 1] keep = (w * h > min_scale * min_scale) & (w * h < max_scale * max_scale) return output[keep] def postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45): box_corner = prediction.new(prediction.shape) box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2 box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2 box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2 box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2 prediction[:, :, :4] = box_corner[:, :, :4] output = [None for _ in range(len(prediction))] for i, image_pred in enumerate(prediction): # If none are remaining => process next image if not image_pred.size(0): continue # Get score and class with highest confidence class_conf, class_pred = torch.max( image_pred[:, 5 : 5 + num_classes], 1, keepdim=True ) conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= conf_thre).squeeze() # _, conf_mask = torch.topk((image_pred[:, 4] * class_conf.squeeze()), 1000) # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred) detections = torch.cat((image_pred[:, :5], class_conf, class_pred.float()), 1) detections = detections[conf_mask] if not detections.size(0): continue nms_out_index = torchvision.ops.batched_nms( detections[:, :4], detections[:, 4] * detections[:, 5], detections[:, 6], nms_thre, ) detections = detections[nms_out_index] if output[i] is None: output[i] = detections else: output[i] = torch.cat((output[i], detections)) return output def diffusion_postprocess(pre_prediction,cur_prediction,conf_scores,conf_thre=0.7,det_thre=0.65,nms_thre3d=0.7,nms_thre2d=0.75): output = [None for _ in range(len(pre_prediction))] for i,(pre_image_pred,cur_image_pred,asscociate_score) in enumerate(zip(pre_prediction,cur_prediction,conf_scores)): asscociate_score=asscociate_score.flatten() # If none are remaining => process next image if not pre_image_pred.size(0): continue # _, conf_mask = torch.topk((image_pred[:, 4] * class_conf.squeeze()), 1000) # Detections ordered as (x1, y1, x2, y2, association_conf, class_conf, class_pred) detections=torch.zeros((2,len(cur_image_pred),7),dtype=cur_image_pred.dtype,device=cur_image_pred.device) detections[0,:,:4]=pre_image_pred[:,:4] detections[1,:,:4]=cur_image_pred[:,:4] detections[0,:,4]=asscociate_score detections[1,:,4]=asscociate_score detections[0,:,5]=torch.sqrt(torch.sigmoid(pre_image_pred[:,4])*asscociate_score) detections[1,:,5]=torch.sqrt(torch.sigmoid(cur_image_pred[:,4])*asscociate_score) score_out_index=asscociate_score>conf_thre detections=detections[:,score_out_index,:] if not detections.size(1): continue nms_out_index_3d = cluster_nms(detections[0,:,:4], detections[1,:,:4], detections[0,:,4], iou_threshold=nms_thre3d) detections = detections[:,nms_out_index_3d,:] detections=torch.cat([detections[0],detections[1]],dim=0) class_score_out_index=detections[:,5]>det_thre detections=detections[class_score_out_index] nms_out_index_2d = torchvision.ops.batched_nms( detections[:,:4], detections[:,5], idxs=detections[:,6], iou_threshold=nms_thre2d) detections = detections[nms_out_index_2d] if output[i] is None: output[i] = detections else: output[i] = torch.cat((output[i], detections)) return output def bboxes_iou(bboxes_a, bboxes_b, xyxy=True): if bboxes_a.shape[1] != 4 or bboxes_b.shape[1] != 4: raise IndexError if xyxy: tl = torch.max(bboxes_a[:, None, :2], bboxes_b[:, :2]) br = torch.min(bboxes_a[:, None, 2:], bboxes_b[:, 2:]) area_a = torch.prod(bboxes_a[:, 2:] - bboxes_a[:, :2], 1) area_b = torch.prod(bboxes_b[:, 2:] - bboxes_b[:, :2], 1) else: tl = torch.max( (bboxes_a[:, None, :2] - bboxes_a[:, None, 2:] / 2), (bboxes_b[:, :2] - bboxes_b[:, 2:] / 2), ) br = torch.min( (bboxes_a[:, None, :2] + bboxes_a[:, None, 2:] / 2), (bboxes_b[:, :2] + bboxes_b[:, 2:] / 2), ) area_a = torch.prod(bboxes_a[:, 2:], 1) area_b = torch.prod(bboxes_b[:, 2:], 1) en = (tl < br).type(tl.type()).prod(dim=2) area_i = torch.prod(br - tl, 2) * en # * ((tl < br).all()) return area_i / (area_a[:, None] + area_b - area_i) def matrix_iou(a, b): """ return iou of a and b, numpy version for data augenmentation """ lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) area_b = np.prod(b[:, 2:] - b[:, :2], axis=1) return area_i / (area_a[:, np.newaxis] + area_b - area_i + 1e-12) def adjust_box_anns(bbox, scale_ratio, padw, padh, w_max, h_max): #bbox[:, 0::2] = np.clip(bbox[:, 0::2] * scale_ratio + padw, 0, w_max) #bbox[:, 1::2] = np.clip(bbox[:, 1::2] * scale_ratio + padh, 0, h_max) bbox[:, 0::2] = bbox[:, 0::2] * scale_ratio + padw bbox[:, 1::2] = bbox[:, 1::2] * scale_ratio + padh return bbox def xyxy2xywh(bboxes): bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 0] bboxes[:, 3] = bboxes[:, 3] - bboxes[:, 1] return bboxes def xyxy2cxcywh(bboxes): bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 0] bboxes[:, 3] = bboxes[:, 3] - bboxes[:, 1] bboxes[:, 0] = bboxes[:, 0] + bboxes[:, 2] * 0.5 bboxes[:, 1] = bboxes[:, 1] + bboxes[:, 3] * 0.5 return bboxes ================================================ FILE: yolox/utils/checkpoint.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. from loguru import logger import torch import os import shutil def load_ckpt(model, ckpt): model_state_dict = model.state_dict() load_dict = {} for key_model, v in model_state_dict.items(): if key_model not in ckpt: logger.warning( "{} is not in the ckpt. Please double check and see if this is desired.".format( key_model ) ) continue v_ckpt = ckpt[key_model] if v.shape != v_ckpt.shape: logger.warning( "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( key_model, v_ckpt.shape, key_model, v.shape ) ) continue load_dict[key_model] = v_ckpt model.load_state_dict(load_dict, strict=False) return model def save_checkpoint(state, is_best, save_dir, model_name=""): if not os.path.exists(save_dir): os.makedirs(save_dir) filename = os.path.join(save_dir, model_name + "_ckpt.pth.tar") torch.save(state, filename) if is_best: best_filename = os.path.join(save_dir, "best_ckpt.pth.tar") shutil.copyfile(filename, best_filename) ================================================ FILE: yolox/utils/cluster_nms.py ================================================ import torch @torch.jit.script def intersect(box_a, box_b): """ We resize both tensors to [A,B,2] without new malloc: [A,2] -> [A,1,2] -> [A,B,2] [B,2] -> [1,B,2] -> [A,B,2] Then we compute the area of intersect between box_a and box_b. Args: box_a: (tensor) bounding boxes, Shape: [n,A,4]. box_b: (tensor) bounding boxes, Shape: [n,B,4]. Return: (tensor) intersection area, Shape: [n,A,B]. """ n = box_a.size(0) A = box_a.size(1) B = box_b.size(1) max_xy = torch.min(box_a[:, :, 2:].unsqueeze(2).expand(n, A, B, 2), box_b[:, :, 2:].unsqueeze(1).expand(n, A, B, 2)) min_xy = torch.max(box_a[:, :, :2].unsqueeze(2).expand(n, A, B, 2), box_b[:, :, :2].unsqueeze(1).expand(n, A, B, 2)) return torch.clamp(max_xy - min_xy, min=0).prod(3) # inter @torch.jit.script def garea(box_a, box_b): """ We resize both tensors to [A,B,2] without new malloc: [A,2] -> [A,1,2] -> [A,B,2] [B,2] -> [1,B,2] -> [A,B,2] Then we compute the area of intersect between box_a and box_b. Args: box_a: (tensor) bounding boxes, Shape: [n,A,4]. box_b: (tensor) bounding boxes, Shape: [n,B,4]. Return: (tensor) intersection area, Shape: [n,A,B]. """ n = box_a.size(0) A = box_a.size(1) B = box_b.size(1) max_xy = torch.max(box_a[:, :, 2:].unsqueeze(2).expand(n, A, B, 2), box_b[:, :, 2:].unsqueeze(1).expand(n, A, B, 2)) min_xy = torch.min(box_a[:, :, :2].unsqueeze(2).expand(n, A, B, 2), box_b[:, :, :2].unsqueeze(1).expand(n, A, B, 2)) return torch.clamp(max_xy - min_xy, min=0).prod(3) # inter @torch.jit.script def get_box_area(box): return (box[:, :, 2]-box[:, :, 0]) *(box[:, :, 3]-box[:, :, 1]) def giou_3d(box_a,box_b,box_c,box_d): """Compute the jaccard overlap of two sets of boxes. The jaccard overlap is simply the intersection over union of two boxes. Here we operate on ground truth boxes and default boxes. If iscrowd=True, put the crowd in box_b. E.g.: A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B) Args: box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4] box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4] Return: jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)] """ use_batch = True if box_a.dim() == 2: use_batch = False box_a = box_a[None, ...] box_b = box_b[None, ...] box_c = box_c[None, ...] box_d = box_d[None, ...] interab = intersect(box_a,box_b) intercd = intersect(box_c,box_d) area_ab= garea(box_a,box_b) area_cd=garea(box_c,box_d) area_a = get_box_area(box_a).unsqueeze(2).expand_as(interab) # [A,B] area_b = get_box_area(box_b).unsqueeze(1).expand_as(interab) # [A,B] area_c = get_box_area(box_c).unsqueeze(2).expand_as(intercd) # [A,B] area_d = get_box_area(box_d).unsqueeze(1).expand_as(intercd) # [A,B] unionab = area_a + area_b - interab unioncd = area_c+area_d-intercd uiouabcd = (interab+intercd) / (unionab+unioncd) out=uiouabcd-(area_ab+area_cd-unionab-unioncd)/(area_ab+area_cd) return out if use_batch else out.squeeze(0) def cluster_nms(boxes_a,boxes_c,scores,iou_threshold:float=0.5, top_k:int=500): # Collapse all the classes into 1 _, idx = scores.sort(0, descending=True) idx = idx[:top_k] boxes_a = boxes_a[idx] boxes_b = boxes_a boxes_c = boxes_c[idx] boxes_d = boxes_c iou = giou_3d(boxes_a,boxes_b,boxes_c,boxes_d).triu_(diagonal=1) B = iou for i in range(200): A=B maxA,_=torch.max(A, dim=0) E = (maxA<=iou_threshold).float().unsqueeze(1).expand_as(A) B=iou.mul(E) if A.equal(B)==True: break idx_out = idx[maxA <= iou_threshold] return idx_out # ## test # boxes_a=[[100,100,200,200], # [110,110,210,210], # [50,50,150,150], # [100,100,200,200], # [90,90,190,190],] # boxes_c=[[100,100,200,200], # [110,110,210,210], # [150,150,250,250], # [0,0,100,100], # [10,10,110,110],] # scores=[0.91,0.9,0.95,0.9,0.8] # boxes_a=torch.tensor(boxes_a,dtype=torch.float) # boxes_c=torch.tensor(boxes_c,dtype=torch.float) # scores=torch.tensor(scores,dtype=torch.float) # indix=cluster_nms(boxes_a,boxes_c,scores) # print(indix) ================================================ FILE: yolox/utils/demo_utils.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import numpy as np import os __all__ = ["mkdir", "nms", "multiclass_nms", "demo_postprocess"] def mkdir(path): if not os.path.exists(path): os.makedirs(path) def nms(boxes, scores, nms_thr): """Single class NMS implemented in Numpy.""" x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] areas = (x2 - x1 + 1) * (y2 - y1 + 1) order = scores.argsort()[::-1] keep = [] while order.size > 0: i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h ovr = inter / (areas[i] + areas[order[1:]] - inter) inds = np.where(ovr <= nms_thr)[0] order = order[inds + 1] return keep def multiclass_nms(boxes, scores, nms_thr, score_thr): """Multiclass NMS implemented in Numpy""" final_dets = [] num_classes = scores.shape[1] for cls_ind in range(num_classes): cls_scores = scores[:, cls_ind] valid_score_mask = cls_scores > score_thr if valid_score_mask.sum() == 0: continue else: valid_scores = cls_scores[valid_score_mask] valid_boxes = boxes[valid_score_mask] keep = nms(valid_boxes, valid_scores, nms_thr) if len(keep) > 0: cls_inds = np.ones((len(keep), 1)) * cls_ind dets = np.concatenate( [valid_boxes[keep], valid_scores[keep, None], cls_inds], 1 ) final_dets.append(dets) if len(final_dets) == 0: return None return np.concatenate(final_dets, 0) def demo_postprocess(outputs, img_size, p6=False): grids = [] expanded_strides = [] if not p6: strides = [8, 16, 32] else: strides = [8, 16, 32, 64] hsizes = [img_size[0] // stride for stride in strides] wsizes = [img_size[1] // stride for stride in strides] for hsize, wsize, stride in zip(hsizes, wsizes, strides): xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize)) grid = np.stack((xv, yv), 2).reshape(1, -1, 2) grids.append(grid) shape = grid.shape[:2] expanded_strides.append(np.full((*shape, 1), stride)) grids = np.concatenate(grids, 1) expanded_strides = np.concatenate(expanded_strides, 1) outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides return outputs ================================================ FILE: yolox/utils/dist.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # This file mainly comes from # https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/comm.py # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. """ This file contains primitives for multi-gpu communication. This is useful when doing distributed training. """ import numpy as np import torch from torch import distributed as dist import functools import logging import pickle import time __all__ = [ "is_main_process", "synchronize", "get_world_size", "get_rank", "get_local_rank", "get_local_size", "time_synchronized", "gather", "all_gather", ] _LOCAL_PROCESS_GROUP = None def synchronize(): """ Helper function to synchronize (barrier) among all processes when using distributed training """ if not dist.is_available(): return if not dist.is_initialized(): return world_size = dist.get_world_size() if world_size == 1: return dist.barrier() def get_world_size() -> int: if not dist.is_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size() def get_rank() -> int: if not dist.is_available(): return 0 if not dist.is_initialized(): return 0 return dist.get_rank() def get_local_rank() -> int: """ Returns: The rank of the current process within the local (per-machine) process group. """ if not dist.is_available(): return 0 if not dist.is_initialized(): return 0 assert _LOCAL_PROCESS_GROUP is not None return dist.get_rank(group=_LOCAL_PROCESS_GROUP) def get_local_size() -> int: """ Returns: The size of the per-machine process group, i.e. the number of processes per machine. """ if not dist.is_available(): return 1 if not dist.is_initialized(): return 1 return dist.get_world_size(group=_LOCAL_PROCESS_GROUP) def is_main_process() -> bool: return get_rank() == 0 @functools.lru_cache() def _get_global_gloo_group(): """ Return a process group based on gloo backend, containing all the ranks The result is cached. """ if dist.get_backend() == "nccl": return dist.new_group(backend="gloo") else: return dist.group.WORLD def _serialize_to_tensor(data, group): backend = dist.get_backend(group) assert backend in ["gloo", "nccl"] device = torch.device("cpu" if backend == "gloo" else "cuda") buffer = pickle.dumps(data) if len(buffer) > 1024 ** 3: logger = logging.getLogger(__name__) logger.warning( "Rank {} trying to all-gather {:.2f} GB of data on device {}".format( get_rank(), len(buffer) / (1024 ** 3), device ) ) storage = torch.ByteStorage.from_buffer(buffer) tensor = torch.ByteTensor(storage).to(device=device) return tensor def _pad_to_largest_tensor(tensor, group): """ Returns: list[int]: size of the tensor, on each rank Tensor: padded tensor that has the max size """ world_size = dist.get_world_size(group=group) assert ( world_size >= 1 ), "comm.gather/all_gather must be called from ranks within the given group!" local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device) size_list = [ torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size) ] dist.all_gather(size_list, local_size, group=group) size_list = [int(size.item()) for size in size_list] max_size = max(size_list) # we pad the tensor because torch all_gather does not support # gathering tensors of different shapes if local_size != max_size: padding = torch.zeros( (max_size - local_size,), dtype=torch.uint8, device=tensor.device ) tensor = torch.cat((tensor, padding), dim=0) return size_list, tensor def all_gather(data, group=None): """ Run all_gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: list of data gathered from each rank """ if get_world_size() == 1: return [data] if group is None: group = _get_global_gloo_group() if dist.get_world_size(group) == 1: return [data] tensor = _serialize_to_tensor(data, group) size_list, tensor = _pad_to_largest_tensor(tensor, group) max_size = max(size_list) # receiving Tensor from all ranks tensor_list = [ torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list ] dist.all_gather(tensor_list, tensor, group=group) data_list = [] for size, tensor in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list def gather(data, dst=0, group=None): """ Run gather on arbitrary picklable data (not necessarily tensors). Args: data: any picklable object dst (int): destination rank group: a torch process group. By default, will use a group which contains all ranks on gloo backend. Returns: list[data]: on dst, a list of data gathered from each rank. Otherwise, an empty list. """ if get_world_size() == 1: return [data] if group is None: group = _get_global_gloo_group() if dist.get_world_size(group=group) == 1: return [data] rank = dist.get_rank(group=group) tensor = _serialize_to_tensor(data, group) size_list, tensor = _pad_to_largest_tensor(tensor, group) # receiving Tensor from all ranks if rank == dst: max_size = max(size_list) tensor_list = [ torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list ] dist.gather(tensor, tensor_list, dst=dst, group=group) data_list = [] for size, tensor in zip(size_list, tensor_list): buffer = tensor.cpu().numpy().tobytes()[:size] data_list.append(pickle.loads(buffer)) return data_list else: dist.gather(tensor, [], dst=dst, group=group) return [] def shared_random_seed(): """ Returns: int: a random number that is the same across all workers. If workers need a shared RNG, they can use this shared seed to create one. All workers must call this function, otherwise it will deadlock. """ ints = np.random.randint(2 ** 31) all_ints = all_gather(ints) return all_ints[0] def time_synchronized(): """pytorch-accurate time""" if torch.cuda.is_available(): torch.cuda.synchronize() return time.time() def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True ================================================ FILE: yolox/utils/ema.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import torch import torch.nn as nn import math from copy import deepcopy def is_parallel(model): """check if model is in parallel mode.""" parallel_type = ( nn.parallel.DataParallel, nn.parallel.DistributedDataParallel, ) return isinstance(model, parallel_type) def copy_attr(a, b, include=(), exclude=()): # Copy attributes from b to a, options to only include [...] and to exclude [...] for k, v in b.__dict__.items(): if (len(include) and k not in include) or k.startswith("_") or k in exclude: continue else: setattr(a, k, v) class ModelEMA: """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models Keep a moving average of everything in the model state_dict (parameters and buffers). This is intended to allow functionality like https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage A smoothed version of the weights is necessary for some training schemes to perform well. This class is sensitive where it is initialized in the sequence of model init, GPU assignment and distributed training wrappers. """ def __init__(self, model, decay=0.9999, updates=0): """ Args: model (nn.Module): model to apply EMA. decay (float): ema decay reate. updates (int): counter of EMA updates. """ # Create EMA(FP32) self.ema = deepcopy(model.module if is_parallel(model) else model).eval() self.updates = updates # decay exponential ramp (to help early epochs) self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) for p in self.ema.parameters(): p.requires_grad_(False) def update(self, model): # Update EMA parameters with torch.no_grad(): self.updates += 1 d = self.decay(self.updates) msd = ( model.module.state_dict() if is_parallel(model) else model.state_dict() ) # model state_dict for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: v *= d v += (1.0 - d) * msd[k].detach() def update_attr(self, model, include=(), exclude=("process_group", "reducer")): # Update EMA attributes copy_attr(self.ema, model, include, exclude) ================================================ FILE: yolox/utils/logger.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. from loguru import logger import inspect import os import sys def get_caller_name(depth=0): """ Args: depth (int): Depth of caller conext, use 0 for caller depth. Default value: 0. Returns: str: module name of the caller """ # the following logic is a little bit faster than inspect.stack() logic frame = inspect.currentframe().f_back for _ in range(depth): frame = frame.f_back return frame.f_globals["__name__"] class StreamToLoguru: """ stream object that redirects writes to a logger instance. """ def __init__(self, level="INFO", caller_names=("apex", "pycocotools")): """ Args: level(str): log level string of loguru. Default value: "INFO". caller_names(tuple): caller names of redirected module. Default value: (apex, pycocotools). """ self.level = level self.linebuf = "" self.caller_names = caller_names def write(self, buf): full_name = get_caller_name(depth=1) module_name = full_name.rsplit(".", maxsplit=-1)[0] if module_name in self.caller_names: for line in buf.rstrip().splitlines(): # use caller level log logger.opt(depth=2).log(self.level, line.rstrip()) else: sys.__stdout__.write(buf) def flush(self): pass def redirect_sys_output(log_level="INFO"): redirect_logger = StreamToLoguru(log_level) sys.stderr = redirect_logger sys.stdout = redirect_logger def setup_logger(save_dir, distributed_rank=0, filename="log.txt", mode="a"): """setup logger for training and testing. Args: save_dir(str): location to save log file distributed_rank(int): device rank when multi-gpu environment filename (string): log save name. mode(str): log file write mode, `append` or `override`. default is `a`. Return: logger instance. """ loguru_format = ( "{time:YYYY-MM-DD HH:mm:ss} | " "{level: <8} | " "{name}:{line} - {message}" ) logger.remove() save_file = os.path.join(save_dir, filename) if mode == "o" and os.path.exists(save_file): os.remove(save_file) # only keep logger in rank0 process if distributed_rank == 0: logger.add( sys.stderr, format=loguru_format, level="INFO", enqueue=True, ) logger.add(save_file) # redirect stdout/stderr to loguru redirect_sys_output("INFO") ================================================ FILE: yolox/utils/lr_scheduler.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import math from functools import partial class LRScheduler: def __init__(self, name, lr, iters_per_epoch, total_epochs, **kwargs): """ Supported lr schedulers: [cos, warmcos, multistep] Args: lr (float): learning rate. iters_per_peoch (int): number of iterations in one epoch. total_epochs (int): number of epochs in training. kwargs (dict): - cos: None - warmcos: [warmup_epochs, warmup_lr_start (default 1e-6)] - multistep: [milestones (epochs), gamma (default 0.1)] """ self.lr = lr self.iters_per_epoch = iters_per_epoch self.total_epochs = total_epochs self.total_iters = iters_per_epoch * total_epochs self.__dict__.update(kwargs) self.lr_func = self._get_lr_func(name) def update_lr(self, iters): return self.lr_func(iters) def _get_lr_func(self, name): if name == "cos": # cosine lr schedule lr_func = partial(cos_lr, self.lr, self.total_iters) elif name == "warmcos": warmup_total_iters = self.iters_per_epoch * self.warmup_epochs warmup_lr_start = getattr(self, "warmup_lr_start", 1e-6) lr_func = partial( warm_cos_lr, self.lr, self.total_iters, warmup_total_iters, warmup_lr_start, ) elif name == "yoloxwarmcos": warmup_total_iters = self.iters_per_epoch * self.warmup_epochs no_aug_iters = self.iters_per_epoch * self.no_aug_epochs warmup_lr_start = getattr(self, "warmup_lr_start", 0) min_lr_ratio = getattr(self, "min_lr_ratio", 0.2) lr_func = partial( yolox_warm_cos_lr, self.lr, min_lr_ratio, self.total_iters, warmup_total_iters, warmup_lr_start, no_aug_iters, ) elif name == "yoloxsemiwarmcos": warmup_lr_start = getattr(self, "warmup_lr_start", 0) min_lr_ratio = getattr(self, "min_lr_ratio", 0.2) warmup_total_iters = self.iters_per_epoch * self.warmup_epochs no_aug_iters = self.iters_per_epoch * self.no_aug_epochs normal_iters = self.iters_per_epoch * self.semi_epoch semi_iters = self.iters_per_epoch_semi * ( self.total_epochs - self.semi_epoch - self.no_aug_epochs ) lr_func = partial( yolox_semi_warm_cos_lr, self.lr, min_lr_ratio, warmup_lr_start, self.total_iters, normal_iters, no_aug_iters, warmup_total_iters, semi_iters, self.iters_per_epoch, self.iters_per_epoch_semi, ) elif name == "multistep": # stepwise lr schedule milestones = [ int(self.total_iters * milestone / self.total_epochs) for milestone in self.milestones ] gamma = getattr(self, "gamma", 0.1) lr_func = partial(multistep_lr, self.lr, milestones, gamma) else: raise ValueError("Scheduler version {} not supported.".format(name)) return lr_func def cos_lr(lr, total_iters, iters): """Cosine learning rate""" lr *= 0.5 * (1.0 + math.cos(math.pi * iters / total_iters)) return lr def warm_cos_lr(lr, total_iters, warmup_total_iters, warmup_lr_start, iters): """Cosine learning rate with warm up.""" if iters <= warmup_total_iters: lr = (lr - warmup_lr_start) * iters / float( warmup_total_iters ) + warmup_lr_start else: lr *= 0.5 * ( 1.0 + math.cos( math.pi * (iters - warmup_total_iters) / (total_iters - warmup_total_iters) ) ) return lr def yolox_warm_cos_lr( lr, min_lr_ratio, total_iters, warmup_total_iters, warmup_lr_start, no_aug_iter, iters, ): """Cosine learning rate with warm up.""" min_lr = lr * min_lr_ratio if iters <= warmup_total_iters: # lr = (lr - warmup_lr_start) * iters / float(warmup_total_iters) + warmup_lr_start lr = (lr - warmup_lr_start) * pow( iters / float(warmup_total_iters), 2 ) + warmup_lr_start elif iters >= total_iters - no_aug_iter: lr = min_lr else: lr = min_lr + 0.5 * (lr - min_lr) * ( 1.0 + math.cos( math.pi * (iters - warmup_total_iters) / (total_iters - warmup_total_iters - no_aug_iter) ) ) return lr def yolox_semi_warm_cos_lr( lr, min_lr_ratio, warmup_lr_start, total_iters, normal_iters, no_aug_iters, warmup_total_iters, semi_iters, iters_per_epoch, iters_per_epoch_semi, iters, ): """Cosine learning rate with warm up.""" min_lr = lr * min_lr_ratio if iters <= warmup_total_iters: # lr = (lr - warmup_lr_start) * iters / float(warmup_total_iters) + warmup_lr_start lr = (lr - warmup_lr_start) * pow( iters / float(warmup_total_iters), 2 ) + warmup_lr_start elif iters >= normal_iters + semi_iters: lr = min_lr elif iters <= normal_iters: lr = min_lr + 0.5 * (lr - min_lr) * ( 1.0 + math.cos( math.pi * (iters - warmup_total_iters) / (total_iters - warmup_total_iters - no_aug_iters) ) ) else: lr = min_lr + 0.5 * (lr - min_lr) * ( 1.0 + math.cos( math.pi * ( normal_iters - warmup_total_iters + (iters - normal_iters) * iters_per_epoch * 1.0 / iters_per_epoch_semi ) / (total_iters - warmup_total_iters - no_aug_iters) ) ) return lr def multistep_lr(lr, milestones, gamma, iters): """MultiStep learning rate""" for milestone in milestones: lr *= gamma if iters >= milestone else 1.0 return lr ================================================ FILE: yolox/utils/metric.py ================================================ #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import numpy as np import torch import functools import os import time from collections import defaultdict, deque __all__ = [ "AverageMeter", "MeterBuffer", "get_total_and_free_memory_in_Mb", "occupy_mem", "gpu_mem_usage", ] def get_total_and_free_memory_in_Mb(cuda_device): devices_info_str = os.popen( "nvidia-smi --query-gpu=memory.total,memory.used --format=csv,nounits,noheader" ) devices_info = devices_info_str.read().strip().split("\n") total, used = devices_info[int(cuda_device)].split(",") return int(total), int(used) def occupy_mem(cuda_device, mem_ratio=0.95): """ pre-allocate gpu memory for training to avoid memory Fragmentation. """ total, used = get_total_and_free_memory_in_Mb(cuda_device) max_mem = int(total * mem_ratio) block_mem = max_mem - used x = torch.cuda.FloatTensor(256, 1024, block_mem) del x time.sleep(5) def gpu_mem_usage(): """ Compute the GPU memory usage for the current device (MB). """ mem_usage_bytes = torch.cuda.max_memory_allocated() return mem_usage_bytes / (1024 * 1024) class AverageMeter: """Track a series of values and provide access to smoothed values over a window or the global series average. """ def __init__(self, window_size=50): self._deque = deque(maxlen=window_size) self._total = 0.0 self._count = 0 def update(self, value): self._deque.append(value) self._count += 1 self._total += value @property def median(self): d = np.array(list(self._deque)) return np.median(d) @property def avg(self): # if deque is empty, nan will be returned. d = np.array(list(self._deque)) return d.mean() @property def global_avg(self): return self._total / max(self._count, 1e-5) @property def latest(self): return self._deque[-1] if len(self._deque) > 0 else None @property def total(self): return self._total def reset(self): self._deque.clear() self._total = 0.0 self._count = 0 def clear(self): self._deque.clear() class MeterBuffer(defaultdict): """Computes and stores the average and current value""" def __init__(self, window_size=20): factory = functools.partial(AverageMeter, window_size=window_size) super().__init__(factory) def reset(self): for v in self.values(): v.reset() def get_filtered_meter(self, filter_key="time"): return {k: v for k, v in self.items() if filter_key in k} def update(self, values=None, **kwargs): if values is None: values = {} values.update(kwargs) for k, v in values.items(): if isinstance(v, torch.Tensor): v = v.detach() self[k].update(v) def clear_meters(self): for v in self.values(): v.clear() ================================================ FILE: yolox/utils/model_utils.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import torch import torch.nn as nn from thop import profile from copy import deepcopy __all__ = [ "fuse_conv_and_bn", "fuse_model", "get_model_info", "replace_module", ] def get_model_info(model, tsize): stride = 64 img = torch.zeros((2, 3, stride, stride), device=next(model.parameters()).device) flops, params = profile(deepcopy(model), inputs=(img.split(1,dim=0),), verbose=False) params /= 1e6 flops /= 1e9 flops *= tsize[0] * tsize[1] / stride / stride * 2 # Gflops info = "Params: {:.2f}M, Gflops: {:.2f}".format(params, flops) return info def fuse_conv_and_bn(conv, bn): # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ fusedconv = ( nn.Conv2d( conv.in_channels, conv.out_channels, kernel_size=conv.kernel_size, stride=conv.stride, padding=conv.padding, groups=conv.groups, bias=True, ) .requires_grad_(False) .to(conv.weight.device) ) # prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) # prepare spatial bias b_conv = ( torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias ) b_bn = bn.bias - bn.weight.mul(bn.running_mean).div( torch.sqrt(bn.running_var + bn.eps) ) fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) return fusedconv def fuse_model(model): from yolox.models.network_blocks import BaseConv for m in model.modules(): if type(m) is BaseConv and hasattr(m, "bn"): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv delattr(m, "bn") # remove batchnorm m.forward = m.fuseforward # update forward return model def replace_module(module, replaced_module_type, new_module_type, replace_func=None): """ Replace given type in module to a new type. mostly used in deploy. Args: module (nn.Module): model to apply replace operation. replaced_module_type (Type): module type to be replaced. new_module_type (Type) replace_func (function): python function to describe replace logic. Defalut value None. Returns: model (nn.Module): module that already been replaced. """ def default_replace_func(replaced_module_type, new_module_type): return new_module_type() if replace_func is None: replace_func = default_replace_func model = module if isinstance(module, replaced_module_type): model = replace_func(replaced_module_type, new_module_type) else: # recurrsively replace for name, child in module.named_children(): new_child = replace_module(child, replaced_module_type, new_module_type) if new_child is not child: # child is already replaced model.add_module(name, new_child) return model ================================================ FILE: yolox/utils/setup_env.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import cv2 import os import subprocess __all__ = ["configure_nccl", "configure_module"] def configure_nccl(): """Configure multi-machine environment variables of NCCL.""" os.environ["NCCL_LAUNCH_MODE"] = "PARALLEL" os.environ["NCCL_IB_HCA"] = subprocess.getoutput( "pushd /sys/class/infiniband/ > /dev/null; for i in mlx5_*; " "do cat $i/ports/1/gid_attrs/types/* 2>/dev/null " "| grep v >/dev/null && echo $i ; done; popd > /dev/null" ) os.environ["NCCL_IB_GID_INDEX"] = "3" os.environ["NCCL_IB_TC"] = "106" def configure_module(ulimit_value=8192): """ Configure pytorch module environment. setting of ulimit and cv2 will be set. Args: ulimit_value(int): default open file number on linux. Default value: 8192. """ # system setting try: import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) resource.setrlimit(resource.RLIMIT_NOFILE, (ulimit_value, rlimit[1])) except Exception: # Exception might be raised in Windows OS or rlimit reaches max limit number. # However, set rlimit value might not be necessary. pass # cv2 # multiprocess might be harmful on performance of torch dataloader os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled" try: cv2.setNumThreads(0) cv2.ocl.setUseOpenCL(False) except Exception: # cv2 version mismatch might rasie exceptions. pass ================================================ FILE: yolox/utils/visualize.py ================================================ #!/usr/bin/env python3 # -*- coding:utf-8 -*- # Copyright (c) 2014-2021 Megvii Inc. All rights reserved. import cv2 import numpy as np __all__ = ["vis"] def vis(img, boxes, scores, cls_ids, conf=0.5, class_names=None): for i in range(len(boxes)): box = boxes[i] cls_id = int(cls_ids[i]) score = scores[i] if score < conf: continue x0 = int(box[0]) y0 = int(box[1]) x1 = int(box[2]) y1 = int(box[3]) color = (_COLORS[cls_id] * 255).astype(np.uint8).tolist() text = '{}:{:.1f}%'.format(class_names[cls_id], score * 100) txt_color = (0, 0, 0) if np.mean(_COLORS[cls_id]) > 0.5 else (255, 255, 255) font = cv2.FONT_HERSHEY_SIMPLEX txt_size = cv2.getTextSize(text, font, 0.4, 1)[0] cv2.rectangle(img, (x0, y0), (x1, y1), color, 2) txt_bk_color = (_COLORS[cls_id] * 255 * 0.7).astype(np.uint8).tolist() cv2.rectangle( img, (x0, y0 + 1), (x0 + txt_size[0] + 1, y0 + int(1.5*txt_size[1])), txt_bk_color, -1 ) cv2.putText(img, text, (x0, y0 + txt_size[1]), font, 0.4, txt_color, thickness=1) return img def get_color(idx): idx = idx * 3 color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255) return color def plot_tracking(image, tlwhs, obj_ids, scores=None, frame_id=0, fps=0., ids2=None): im = np.ascontiguousarray(np.copy(image)) im_h, im_w = im.shape[:2] top_view = np.zeros([im_w, im_w, 3], dtype=np.uint8) + 255 #text_scale = max(1, image.shape[1] / 1600.) #text_thickness = 2 #line_thickness = max(1, int(image.shape[1] / 500.)) text_scale = 2 text_thickness = 2 line_thickness = 3 radius = max(5, int(im_w/140.)) cv2.putText(im, 'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)), (0, int(15 * text_scale)), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), thickness=2) for i, tlwh in enumerate(tlwhs): x1, y1, w, h = tlwh intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h))) obj_id = int(obj_ids[i]) id_text = '{}'.format(int(obj_id)) if ids2 is not None: id_text = id_text + ', {}'.format(int(ids2[i])) color = get_color(abs(obj_id)) cv2.rectangle(im, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness) cv2.putText(im, id_text, (intbox[0], intbox[1]), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255), thickness=text_thickness) return im _COLORS = np.array( [ 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.286, 0.286, 0.286, 0.429, 0.429, 0.429, 0.571, 0.571, 0.571, 0.714, 0.714, 0.714, 0.857, 0.857, 0.857, 0.000, 0.447, 0.741, 0.314, 0.717, 0.741, 0.50, 0.5, 0 ] ).astype(np.float32).reshape(-1, 3)