[
  {
    "path": ".gitattributes",
    "content": "README.assets/MOT20.gif filter=lfs diff=lfs merge=lfs -text\nREADME.assets/dancetrack.gif filter=lfs diff=lfs merge=lfs -text\n"
  },
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\ndatasets/*\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\npip-wheel-metadata/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n.python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n# output\ndocs/api\n.code-workspace.code-workspace\n*.pkl\n*.npy\n*.pth\n*.onnx\n*.engine\nevents.out.tfevents*\npretrained\n*_outputs/\nDiffusionTrack_*/\ndatasets/\n*.pth.tar\n*.tar.gz\nsrc/*\ntest.py\nid_rsa_cs\nmodule_test.py\nvis_fold"
  },
  {
    "path": "LICENSE",
    "content": "\nAttribution-NonCommercial 4.0 International\n\n=======================================================================\n\nCreative Commons Corporation (\"Creative Commons\") is not a law firm and\ndoes not provide legal services or legal advice. Distribution of\nCreative Commons public licenses does not create a lawyer-client or\nother relationship. Creative Commons makes its licenses and related\ninformation available on an \"as-is\" basis. Creative Commons gives no\nwarranties regarding its licenses, any material licensed under their\nterms and conditions, or any related information. Creative Commons\ndisclaims all liability for damages resulting from their use to the\nfullest extent possible.\n\nUsing Creative Commons Public Licenses\n\nCreative Commons public licenses provide a standard set of terms and\nconditions that creators and other rights holders may use to share\noriginal works of authorship and other material subject to copyright\nand certain other rights specified in the public license below. The\nfollowing considerations are for informational purposes only, are not\nexhaustive, and do not form part of our licenses.\n\n     Considerations for licensors: Our public licenses are\n     intended for use by those authorized to give the public\n     permission to use material in ways otherwise restricted by\n     copyright and certain other rights. Our licenses are\n     irrevocable. Licensors should read and understand the terms\n     and conditions of the license they choose before applying it.\n     Licensors should also secure all rights necessary before\n     applying our licenses so that the public can reuse the\n     material as expected. Licensors should clearly mark any\n     material not subject to the license. This includes other CC-\n     licensed material, or material used under an exception or\n     limitation to copyright. More considerations for licensors:\n   wiki.creativecommons.org/Considerations_for_licensors\n\n     Considerations for the public: By using one of our public\n     licenses, a licensor grants the public permission to use the\n     licensed material under specified terms and conditions. If\n     the licensor's permission is not necessary for any reason--for\n     example, because of any applicable exception or limitation to\n     copyright--then that use is not regulated by the license. Our\n     licenses grant only permissions under copyright and certain\n     other rights that a licensor has authority to grant. Use of\n     the licensed material may still be restricted for other\n     reasons, including because others have copyright or other\n     rights in the material. A licensor may make special requests,\n     such as asking that all changes be marked or described.\n     Although not required by our licenses, you are encouraged to\n     respect those requests where reasonable. More_considerations\n     for the public: \n   wiki.creativecommons.org/Considerations_for_licensees\n\n=======================================================================\n\nCreative Commons Attribution-NonCommercial 4.0 International Public\nLicense\n\nBy exercising the Licensed Rights (defined below), You accept and agree\nto be bound by the terms and conditions of this Creative Commons\nAttribution-NonCommercial 4.0 International Public License (\"Public\nLicense\"). To the extent this Public License may be interpreted as a\ncontract, You are granted the Licensed Rights in consideration of Your\nacceptance of these terms and conditions, and the Licensor grants You\nsuch rights in consideration of benefits the Licensor receives from\nmaking the Licensed Material available under these terms and\nconditions.\n\nSection 1 -- Definitions.\n\n  a. Adapted Material means material subject to Copyright and Similar\n     Rights that is derived from or based upon the Licensed Material\n     and in which the Licensed Material is translated, altered,\n     arranged, transformed, or otherwise modified in a manner requiring\n     permission under the Copyright and Similar Rights held by the\n     Licensor. For purposes of this Public License, where the Licensed\n     Material is a musical work, performance, or sound recording,\n     Adapted Material is always produced where the Licensed Material is\n     synched in timed relation with a moving image.\n\n  b. Adapter's License means the license You apply to Your Copyright\n     and Similar Rights in Your contributions to Adapted Material in\n     accordance with the terms and conditions of this Public License.\n\n  c. Copyright and Similar Rights means copyright and/or similar rights\n     closely related to copyright including, without limitation,\n     performance, broadcast, sound recording, and Sui Generis Database\n     Rights, without regard to how the rights are labeled or\n     categorized. For purposes of this Public License, the rights\n     specified in Section 2(b)(1)-(2) are not Copyright and Similar\n     Rights.\n  d. Effective Technological Measures means those measures that, in the\n     absence of proper authority, may not be circumvented under laws\n     fulfilling obligations under Article 11 of the WIPO Copyright\n     Treaty adopted on December 20, 1996, and/or similar international\n     agreements.\n\n  e. Exceptions and Limitations means fair use, fair dealing, and/or\n     any other exception or limitation to Copyright and Similar Rights\n     that applies to Your use of the Licensed Material.\n\n  f. Licensed Material means the artistic or literary work, database,\n     or other material to which the Licensor applied this Public\n     License.\n\n  g. Licensed Rights means the rights granted to You subject to the\n     terms and conditions of this Public License, which are limited to\n     all Copyright and Similar Rights that apply to Your use of the\n     Licensed Material and that the Licensor has authority to license.\n\n  h. Licensor means the individual(s) or entity(ies) granting rights\n     under this Public License.\n\n  i. NonCommercial means not primarily intended for or directed towards\n     commercial advantage or monetary compensation. For purposes of\n     this Public License, the exchange of the Licensed Material for\n     other material subject to Copyright and Similar Rights by digital\n     file-sharing or similar means is NonCommercial provided there is\n     no payment of monetary compensation in connection with the\n     exchange.\n\n  j. Share means to provide material to the public by any means or\n     process that requires permission under the Licensed Rights, such\n     as reproduction, public display, public performance, distribution,\n     dissemination, communication, or importation, and to make material\n     available to the public including in ways that members of the\n     public may access the material from a place and at a time\n     individually chosen by them.\n\n  k. Sui Generis Database Rights means rights other than copyright\n     resulting from Directive 96/9/EC of the European Parliament and of\n     the Council of 11 March 1996 on the legal protection of databases,\n     as amended and/or succeeded, as well as other essentially\n     equivalent rights anywhere in the world.\n\n  l. You means the individual or entity exercising the Licensed Rights\n     under this Public License. Your has a corresponding meaning.\n\nSection 2 -- Scope.\n\n  a. License grant.\n\n       1. Subject to the terms and conditions of this Public License,\n          the Licensor hereby grants You a worldwide, royalty-free,\n          non-sublicensable, non-exclusive, irrevocable license to\n          exercise the Licensed Rights in the Licensed Material to:\n\n            a. reproduce and Share the Licensed Material, in whole or\n               in part, for NonCommercial purposes only; and\n\n            b. produce, reproduce, and Share Adapted Material for\n               NonCommercial purposes only.\n\n       2. Exceptions and Limitations. For the avoidance of doubt, where\n          Exceptions and Limitations apply to Your use, this Public\n          License does not apply, and You do not need to comply with\n          its terms and conditions.\n\n       3. Term. The term of this Public License is specified in Section\n          6(a).\n\n       4. Media and formats; technical modifications allowed. The\n          Licensor authorizes You to exercise the Licensed Rights in\n          all media and formats whether now known or hereafter created,\n          and to make technical modifications necessary to do so. The\n          Licensor waives and/or agrees not to assert any right or\n          authority to forbid You from making technical modifications\n          necessary to exercise the Licensed Rights, including\n          technical modifications necessary to circumvent Effective\n          Technological Measures. For purposes of this Public License,\n          simply making modifications authorized by this Section 2(a)\n          (4) never produces Adapted Material.\n\n       5. Downstream recipients.\n\n            a. Offer from the Licensor -- Licensed Material. Every\n               recipient of the Licensed Material automatically\n               receives an offer from the Licensor to exercise the\n               Licensed Rights under the terms and conditions of this\n               Public License.\n\n            b. No downstream restrictions. You may not offer or impose\n               any additional or different terms or conditions on, or\n               apply any Effective Technological Measures to, the\n               Licensed Material if doing so restricts exercise of the\n               Licensed Rights by any recipient of the Licensed\n               Material.\n\n       6. No endorsement. Nothing in this Public License constitutes or\n          may be construed as permission to assert or imply that You\n          are, or that Your use of the Licensed Material is, connected\n          with, or sponsored, endorsed, or granted official status by,\n          the Licensor or others designated to receive attribution as\n          provided in Section 3(a)(1)(A)(i).\n\n  b. Other rights.\n\n       1. Moral rights, such as the right of integrity, are not\n          licensed under this Public License, nor are publicity,\n          privacy, and/or other similar personality rights; however, to\n          the extent possible, the Licensor waives and/or agrees not to\n          assert any such rights held by the Licensor to the limited\n          extent necessary to allow You to exercise the Licensed\n          Rights, but not otherwise.\n\n       2. Patent and trademark rights are not licensed under this\n          Public License.\n\n       3. To the extent possible, the Licensor waives any right to\n          collect royalties from You for the exercise of the Licensed\n          Rights, whether directly or through a collecting society\n          under any voluntary or waivable statutory or compulsory\n          licensing scheme. In all other cases the Licensor expressly\n          reserves any right to collect such royalties, including when\n          the Licensed Material is used other than for NonCommercial\n          purposes.\n\nSection 3 -- License Conditions.\n\nYour exercise of the Licensed Rights is expressly made subject to the\nfollowing conditions.\n\n  a. Attribution.\n\n       1. If You Share the Licensed Material (including in modified\n          form), You must:\n\n            a. retain the following if it is supplied by the Licensor\n               with the Licensed Material:\n\n                 i. identification of the creator(s) of the Licensed\n                    Material and any others designated to receive\n                    attribution, in any reasonable manner requested by\n                    the Licensor (including by pseudonym if\n                    designated);\n\n                ii. a copyright notice;\n\n               iii. a notice that refers to this Public License;\n\n                iv. a notice that refers to the disclaimer of\n                    warranties;\n\n                 v. a URI or hyperlink to the Licensed Material to the\n                    extent reasonably practicable;\n\n            b. indicate if You modified the Licensed Material and\n               retain an indication of any previous modifications; and\n\n            c. indicate the Licensed Material is licensed under this\n               Public License, and include the text of, or the URI or\n               hyperlink to, this Public License.\n\n       2. You may satisfy the conditions in Section 3(a)(1) in any\n          reasonable manner based on the medium, means, and context in\n          which You Share the Licensed Material. For example, it may be\n          reasonable to satisfy the conditions by providing a URI or\n          hyperlink to a resource that includes the required\n          information.\n\n       3. If requested by the Licensor, You must remove any of the\n          information required by Section 3(a)(1)(A) to the extent\n          reasonably practicable.\n\n       4. If You Share Adapted Material You produce, the Adapter's\n          License You apply must not prevent recipients of the Adapted\n          Material from complying with this Public License.\n\nSection 4 -- Sui Generis Database Rights.\n\nWhere the Licensed Rights include Sui Generis Database Rights that\napply to Your use of the Licensed Material:\n\n  a. for the avoidance of doubt, Section 2(a)(1) grants You the right\n     to extract, reuse, reproduce, and Share all or a substantial\n     portion of the contents of the database for NonCommercial purposes\n     only;\n\n  b. if You include all or a substantial portion of the database\n     contents in a database in which You have Sui Generis Database\n     Rights, then the database in which You have Sui Generis Database\n     Rights (but not its individual contents) is Adapted Material; and\n\n  c. You must comply with the conditions in Section 3(a) if You Share\n     all or a substantial portion of the contents of the database.\n\nFor the avoidance of doubt, this Section 4 supplements and does not\nreplace Your obligations under this Public License where the Licensed\nRights include other Copyright and Similar Rights.\n\nSection 5 -- Disclaimer of Warranties and Limitation of Liability.\n\n  a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE\n     EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS\n     AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF\n     ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,\n     IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,\n     WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR\n     PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,\n     ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT\n     KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT\n     ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.\n\n  b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE\n     TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,\n     NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,\n     INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,\n     COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR\n     USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN\n     ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR\n     DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR\n     IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.\n\n  c. The disclaimer of warranties and limitation of liability provided\n     above shall be interpreted in a manner that, to the extent\n     possible, most closely approximates an absolute disclaimer and\n     waiver of all liability.\n\nSection 6 -- Term and Termination.\n\n  a. This Public License applies for the term of the Copyright and\n     Similar Rights licensed here. However, if You fail to comply with\n     this Public License, then Your rights under this Public License\n     terminate automatically.\n\n  b. Where Your right to use the Licensed Material has terminated under\n     Section 6(a), it reinstates:\n\n       1. automatically as of the date the violation is cured, provided\n          it is cured within 30 days of Your discovery of the\n          violation; or\n\n       2. upon express reinstatement by the Licensor.\n\n     For the avoidance of doubt, this Section 6(b) does not affect any\n     right the Licensor may have to seek remedies for Your violations\n     of this Public License.\n\n  c. For the avoidance of doubt, the Licensor may also offer the\n     Licensed Material under separate terms or conditions or stop\n     distributing the Licensed Material at any time; however, doing so\n     will not terminate this Public License.\n\n  d. Sections 1, 5, 6, 7, and 8 survive termination of this Public\n     License.\n\nSection 7 -- Other Terms and Conditions.\n\n  a. The Licensor shall not be bound by any additional or different\n     terms or conditions communicated by You unless expressly agreed.\n\n  b. Any arrangements, understandings, or agreements regarding the\n     Licensed Material not stated herein are separate from and\n     independent of the terms and conditions of this Public License.\n\nSection 8 -- Interpretation.\n\n  a. For the avoidance of doubt, this Public License does not, and\n     shall not be interpreted to, reduce, limit, restrict, or impose\n     conditions on any use of the Licensed Material that could lawfully\n     be made without permission under this Public License.\n\n  b. To the extent possible, if any provision of this Public License is\n     deemed unenforceable, it shall be automatically reformed to the\n     minimum extent necessary to make it enforceable. If the provision\n     cannot be reformed, it shall be severed from this Public License\n     without affecting the enforceability of the remaining terms and\n     conditions.\n\n  c. No term or condition of this Public License will be waived and no\n     failure to comply consented to unless expressly agreed to by the\n     Licensor.\n\n  d. Nothing in this Public License constitutes or may be interpreted\n     as a limitation upon, or waiver of, any privileges and immunities\n     that apply to the Licensor or You, including from the legal\n     processes of any jurisdiction or authority.\n\n=======================================================================\n\nCreative Commons is not a party to its public\nlicenses. Notwithstanding, Creative Commons may elect to apply one of\nits public licenses to material it publishes and in those instances\nwill be considered the “Licensor.” The text of the Creative Commons\npublic licenses is dedicated to the public domain under the CC0 Public\nDomain Dedication. Except for the limited purpose of indicating that\nmaterial is shared under a Creative Commons public license or as\notherwise permitted by the Creative Commons policies published at\ncreativecommons.org/policies, Creative Commons does not authorize the\nuse of the trademark \"Creative Commons\" or any other trademark or logo\nof Creative Commons without its prior written consent including,\nwithout limitation, in connection with any unauthorized modifications\nto any of its public licenses or any other arrangements,\nunderstandings, or agreements concerning use of licensed material. For\nthe avoidance of doubt, this paragraph does not form part of the\npublic licenses.\n\nCreative Commons may be contacted at creativecommons.org.\n"
  },
  {
    "path": "README.md",
    "content": "## DiffusionTrack：Diffusion Model For Multi-Object Tracking\n\n**DiffusionTrack is the first work of diffusion model for multi-object tracking.**\n\n![image-20230819130751450](README.assets/image-20230819130751450.png)\n\n[**DiffusionTrack：Diffusion Model For Multi-Object Tracking**](https://arxiv.org/abs/2308.09905)\n\nRun Luo, Zikai Song, Lintao Ma, Jinlin Wei\n\n*[arXiv 2308.09905](https://arxiv.org/abs/2308.09905)*\n\n## Tracking performance\n\n### Results on MOT17 challenge test set with 15.89 FPS\n\n| Method             | MOTA     | IDF1     | HOTA     | AssA     | DetA     |\n| ------------------ | -------- | -------- | -------- | -------- | -------- |\n| TrackFormer        | 74.1     | 68.0     | 57.3     | 54.1     | 60.9     |\n| MeMOT              | 72.5     | 69.0     | 56.9     | 55.2     | /        |\n| MOTR               | 71.9     | 68.4     | 57.2     | 55.8     | /        |\n| CenterTrack        | 67.8     | 64.7     | 52.2     | 51.0     | 53.8     |\n| PermaTrack         | 73.8     | 68.9     | 55.5     | 53.1     | 58.5     |\n| TransCenter        | 73.2     | 62.2     | 54.5     | 49.7     | 60.1     |\n| GTR                | 75.3     | 71.5     | 59.1     | 57.0     | 61.6     |\n| TubeTK             | 63.0     | 58.6     | /        | /        | /        |\n| **DiffusionTrack** | **77.9** | **73.8** | **60.8** | **58.8** | **63.2** |\n\n### Results on MOT20 challenge test set with 13.37 FPS\n\n| Method             | MOTA     | IDF1     | HOTA     | AssA     | DetA     |\n| ------------------ | -------- | -------- | -------- | -------- | -------- |\n| TrackFormer        | 68.6     | 65.7     | 54.7     | 53.0     | 56.7     |\n| MeMOT              | 63.7     | 66.1     | 54.1     | **55.0** | /        |\n| TransCenter        | 67.7     | 58.7     | /        | /        | /        |\n| **DiffusionTrack** | **72.8** | **66.3** | **55.3** | 51.3     | **59.9** |\n\n### Results on Dancetrack challenge test set with 21.05 FPS\n\n| Method             | MOTA     | IDF1     | HOTA     | AssA     | DetA     |\n| ------------------ | -------- | -------- | -------- | -------- | -------- |\n| TransTrack         | 88.4     | 45.2     | 45.5     | 27.5     | 75.9     |\n| CenterTrack        | 86.8     | 35.7     | 41.8     | 22.6     | 78.1     |\n| **DiffusionTrack** | **89.3** | **47.5** | **52.4** | **33.5** | **82.2** |\n\n### Visualization results\n\n![MOT20](README.assets/MOT20.gif)\n\n![dancetrack](README.assets/dancetrack.gif)\n\n### Robustness to detection perturbation\n\n![image-20230819134931428](README.assets/image-20230819134931428.png)\n\n## Installation\n\nStep1. Install requirements for DiffusionTrack.\n\n```\ngit clone https://github.com/RainBowLuoCS/DiffusionTrack.git\ncd DiffusionTrack\npip3 install -r requirements.txt\npython3 setup.py develop\n```\n\nStep2. Install [pycocotools](https://github.com/cocodataset/cocoapi).\n\n```\npip3 install cython; pip3 install 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'\n```\n\nStep3. Others\n\n```\npip3 install cython_bbox\n```\n\nStep4. Install detectron2\n\n```\ngit clone https://github.com/facebookresearch/detectron2.git\npython -m pip install -e detectron2\n```\n\n## Data preparation\n\nDownload [MOT17](https://motchallenge.net/), [MOT20](https://motchallenge.net/), [CrowdHuman](https://www.crowdhuman.org/), [Cityperson](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/DATASET_ZOO.md), [ETHZ](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/DATASET_ZOO.md) ,[Dancetrack](https://github.com/DanceTrack/DanceTrack) put them under <DiffusionTrack_HOME>/datasets in the following structure:\n\n```\ndatasets\n   |——————mot\n   |        └——————train\n   |        └——————test\n   └——————crowdhuman\n   |         └——————Crowdhuman_train\n   |         └——————Crowdhuman_val\n   |         └——————annotation_train.odgt\n   |         └——————annotation_val.odgt\n   └——————MOT20\n   |        └——————train\n   |        └——————test\n   └——————dancetrack\n   |        └——————train\n   |        └——————test\n   └——————Cityscapes\n   |        └——————images\n   |        └——————labels_with_ids\n   └——————ETHZ\n            └——————eth01\n            └——————...\n            └——————eth07\n```\n\nThen, you need to turn the datasets to COCO format and mix different training data:\n\n```\ncd <DiffusionTrack_HOME>\npython3 tools/convert_mot17_to_coco.py\npython3 tools/convert_dancetrack_to_coco.py\npython3 tools/convert_mot20_to_coco.py\npython3 tools/convert_crowdhuman_to_coco.py\npython3 tools/convert_cityperson_to_coco.py\npython3 tools/convert_ethz_to_coco.py\n```\n\nBefore mixing different datasets, you need to follow the operations in [mix_xxx.py](https://github.com/ifzhang/ByteTrack/blob/c116dfc746f9ebe07d419caa8acba9b3acfa79a6/tools/mix_data_ablation.py#L6) to create a data folder and link. Finally, you can mix the training data:\n\n```\ncd <DiffusionTrack_HOME>\npython3 tools/mix_data_ablation.py\npython3 tools/mix_data_test_mot17.py\npython3 tools/mix_data_test_mot20.py\n```\n\n## Model zoo\n\nYou can download our model weight from [our model zoo](https://drive.google.com/drive/folders/1xfBo04Ncm504xFUMtC4_0g0Bf61yPsXh?usp=sharing). We provide a 32-bit precision model, you can load it and then use half-precision fine-tuning to get a 16-bit precision model weight, so that you will get the above inference speed.\n\n## Training\n\nThe  pretrained YOLOX model can be downloaded from their [model zoo](https://github.com/ifzhang/ByteTrack). After downloading the pretrained models, you can put them under <DiffusionTrack_HOME>/pretrained.\n\n- **Train ablation model (MOT17 half train and CrowdHuman)**\n\n```\ncd <DiffusionTrack_HOME>\npython3 tools/train.py -f exps/example/mot/yolox_x_diffusion_det_mot17_ablation.py -d 8 -b 16 -o -c pretrained/bytetrack_ablation.pth.tar\npython3 tools/train.py -f exps/example/mot/yolox_x_diffusion_track_mot17_ablation.py -d 8 -b 16 -o -c pretrained/diffusiontrack_ablation_det.pth.tar\n```\n\n- **Train MOT17 test model (MOT17 train, CrowdHuman, Cityperson and ETHZ)**\n\n```\ncd <DiffusionTrack_HOME>\npython3 tools/train.py -f exps/example/mot/yolox_x_diffusion_det_mot17.py -d 8 -b 16 -o -c pretrained/bytetrack_x_mot17.pth.tar\npython3 tools/train.py -f exps/example/mot/yolox_x_diffusion_track_mot17.py -d 8 -b 16 -o -c pretrained/diffusiontrack_mot17_det.pth.tar\n```\n\n- **Train MOT20 test model (MOT20 train, CrowdHuman)**\n\n```\ncd <DiffusionTrack_HOME>\npython3 tools/train.py -f exps/example/mot/yolox_x_diffusion_det_mot20.py -d 8 -b 16 -o -c pretrained/bytetrack_x_mot20.pth.tar\npython3 tools/train.py -f exps/example/mot/yolox_x_diffusion_track_mot20.py -d 8 -b 16 -o -c pretrained/diffusiontrack_mot20_det.pth.tar\n```\n\n**Train Dancetrack test model (Dancetrack)**\n\n```\ncd <DiffusionTrack_HOME>\npython3 tools/train.py -f exps/example/mot/yolox_x_diffusion_det_dancetrack.py -d 8 -b 16 -o -c pretrained/bytetrack_x_mot17.pth.tar\npython3 tools/train.py -f exps/example/mot/yolox_x_diffusion_track_dancetrack.py -d 8 -b 16 -o -c pretrained/diffusiontrack_dancetrack_det.pth.tar\n```\n\n## Tracking\n\n- **Evaluation on MOT17 half val**\n\n```\ncd <DiffusionTrack_HOME>\npython3 tools/track.py -f exps/example/mot/yolox_x_diffusion_track_mot17_ablation.py -c pretrained/diffusiontrack_ablation_track.pth.tar -b 1 -d 1 --fuse\n```\n\n- **Test on MOT17**\n\n```\ncd <DiffusionTrack_HOME>\npython3 tools/track.py -f exps/example/mot/yolox_x_diffusion_track_mot17.py -c pretrained/diffusiontrack_mot17_track.pth.tar -b 1 -d 1 --fuse\n```\n\n- **Test on MOT20**\n\n```\ncd <DiffusionTrack_HOME>\npython3 tools/track.py -f exps/example/mot/yolox_x_diffusion_track_mot20.py -c pretrained/diffusiontrack_mot20_track.pth.tar -b 1 -d 1 --fuse\n```\n\n- **Test on Dancetrack**\n\n```\ncd <DiffusionTrack_HOME>\npython3 tools/track.py -f exps/example/mot/yolox_x_diffusion_track_dancetrack.py -c pretrained/diffusiontrack_dancetrack_track.pth.tar -b 1 -d 1 --fuse\n```\n\n## News\n- (2024.02) [DiffMOT](https://github.com/Kroery/DiffMOT.git) is accepted by CVPR2024, demonstrating the potential of the diffusion-based tracker and once again validating our visionary insights, congratulations!\n- (2023.12) Our paper is accepted by AAAI2024!\n- (2023.08) Code is released!\n- (2023.06) Despite being rejected by NIPS2023, we firmly believe the diffusion model is a novel solution for multi-object tracking problems.\n- (2022.11) Write the first line of the code for this great idea!\n\n## License\n\nThis project is under the CC-BY-NC 4.0 license. See [LICENSE](https://github.com/ShoufaChen/DiffusionDet/blob/main/LICENSE) for details.\n\n## Citation\n\nIf you use DiffusionTrack in your research or wish to refer to the baseline results published here, please use the following BibTeX entry.\n\n```\n@article{luo2023diffusiontrack,\n  title={DiffusionTrack: Diffusion Model For Multi-Object Tracking},\n  author={Luo, Run and Song, Zikai and Ma, Lintao and Wei, Jinlin and Yang, Wei and Yang, Min},\n  journal={arXiv preprint arXiv:2308.09905},\n  year={2023}\n}\n```\n\n## Acknowledgement\n\nA large part of the code is borrowed from [ByteTrack](https://github.com/ifzhang/ByteTrack) and [DiffusionDet](https://github.com/ShoufaChen/DiffusionDet) thanks for their wonderful works.\n\n"
  },
  {
    "path": "diffusion/models/diffusion_head.py",
    "content": "import math\nimport random\nfrom collections import namedtuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torchvision.ops import nms,box_iou\n\nfrom .diffusion_losses import SetCriterionDynamicK, HungarianMatcherDynamicK\nfrom .diffusion_models import DynamicHead\n\nfrom yolox.utils.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh\nfrom yolox.utils import synchronize\nfrom detectron2.layers import batched_nms\nimport time\n\nModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start'])\n\n\ndef exists(x):\n    return x is not None\n\n\ndef default(val, d):\n    if exists(val):\n        return val\n    return d() if callable(d) else d\n\n\ndef extract(a, t, x_shape):\n    \"\"\"extract the appropriate  t  index for a batch of indices\"\"\"\n    batch_size = t.shape[0]\n    out = a.gather(-1, t)\n    return out.reshape(batch_size, *((1,) * (len(x_shape) - 1)))\n\n\ndef cosine_beta_schedule(timesteps, s=0.008):\n    \"\"\"\n    cosine schedule\n    as proposed in https://openreview.net/forum?id=-NEXDKk8gZ\n    \"\"\"\n    steps = timesteps + 1\n    x = torch.linspace(0, timesteps, steps, dtype=torch.float64)\n    alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * math.pi * 0.5) ** 2\n    alphas_cumprod = alphas_cumprod / alphas_cumprod[0]\n    betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])\n    return torch.clip(betas, 0, 0.999)\n\nclass DiffusionHead(nn.Module):\n    \"\"\"\n    Implement DiffusionHead\n    \"\"\"\n\n    def __init__(self,\n                num_classes,\n                width=1.0,\n                strides=[8, 16, 32],\n                num_proposals=500,\n                num_heads=6,):\n        super().__init__()\n        self.device=\"cpu\"\n        self.dtype=torch.float32\n        self.width=width\n        self.num_classes = num_classes\n        self.num_proposals = num_proposals\n        # self.num_proposals = 512\n        self.hidden_dim = int(256*width)\n        self.num_heads = num_heads\n\n        # build diffusion\n        timesteps = 1000\n        sampling_timesteps = 1\n        self.objective = 'pred_x0'\n        betas = cosine_beta_schedule(timesteps)\n        alphas = 1. - betas\n        alphas_cumprod = torch.cumprod(alphas, dim=0)\n        alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.)\n        timesteps, = betas.shape\n        self.num_timesteps = int(timesteps)\n\n        # tracking setting\n        self.inference_time_range=1\n        self.track_candidate=1\n        self.candidate_num_strategy=max\n\n        self.sampling_timesteps = default(sampling_timesteps, timesteps)\n        assert self.sampling_timesteps <= timesteps\n        self.is_ddim_sampling = self.sampling_timesteps < timesteps\n        self.ddim_sampling_eta = 1.\n        self.self_condition = False\n        self.scale = 2.0\n        self.box_renewal = True\n        self.use_ensemble = True\n\n        self.register_buffer('betas', betas)\n        self.register_buffer('alphas_cumprod', alphas_cumprod)\n        self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)\n\n        # calculations for diffusion q(x_t | x_{t-1}) and others\n\n        self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))\n        self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))\n        self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))\n        self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))\n        self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))\n\n        # calculations for posterior q(x_{t-1} | x_t, x_0)\n\n        posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)\n\n        # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)\n\n        self.register_buffer('posterior_variance', posterior_variance)\n\n        # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain\n\n        self.register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min=1e-20)))\n        self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))\n        self.register_buffer('posterior_mean_coef2',\n                             (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))\n\n        # Build Dynamic Head.\n        class_weight = 2.0\n        giou_weight = 2.0\n        l1_weight = 5.0\n        no_object_weight =0.1\n        self.deep_supervision = True\n        self.use_focal = True\n        self.use_fed_loss = False\n        self.use_nms = False\n        self.pooler_resolution=7\n        self.noise_strategy=\"xywh\"\n   \n        self.head = DynamicHead(num_classes,self.hidden_dim,self.pooler_resolution,strides,[self.hidden_dim]*len(strides),return_intermediate=self.deep_supervision,num_heads=self.num_heads,use_focal=self.use_focal,use_fed_loss=self.use_fed_loss)\n        # Loss parameters:\n\n        # Build Criterion.\n        matcher = HungarianMatcherDynamicK(\n            cost_class=class_weight, cost_bbox=l1_weight, cost_giou=giou_weight, use_focal=self.use_focal,use_fed_loss=self.use_fed_loss\n        )\n        weight_dict = {\"loss_ce\": class_weight, \"loss_bbox\": l1_weight, \"loss_giou\": giou_weight}\n        if self.deep_supervision:\n            aux_weight_dict = {}\n            for i in range(self.num_heads - 1):\n                aux_weight_dict.update({k + f\"_{i}\": v for k, v in weight_dict.items()})\n            weight_dict.update(aux_weight_dict)\n\n        losses = [\"labels\", \"boxes\"]\n\n        self.criterion = SetCriterionDynamicK(\n            num_classes=self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight,\n            losses=losses, use_focal=self.use_focal,use_fed_loss=self.use_fed_loss)\n\n    def predict_noise_from_start(self, x_t, t, x0):\n        return (\n                (extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) /\n                extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n        )\n\n    def model_predictions(self, backbone_feats,images_whwh,x,t,lost_features=None,fix_bboxes=False,x_self_cond=None,clip_x_start=False):\n\n        def prepare(x,images_whwh):\n            x_boxes = torch.clamp(x, min=-1 * self.scale, max=self.scale)\n            x_boxes = ((x_boxes / self.scale) + 1) / 2\n            x_boxes = box_cxcywh_to_xyxy(x_boxes)\n            x_boxes = x_boxes * images_whwh[:, None, :]\n            return x_boxes\n        \n        def post(x_start,images_whwh):\n            x_start = x_start / images_whwh[:, None, :]\n            x_start = box_xyxy_to_cxcywh(x_start)\n            x_start = (x_start * 2 - 1.) * self.scale\n            x_start = torch.clamp(x_start, min=-1 * self.scale, max=self.scale)\n            return x_start\n        \n        bs=len(x)//2\n        bboxes=prepare(x,images_whwh=images_whwh)\n        start_time=time.time()\n        outputs_class, outputs_coord,outputs_score = self.head(backbone_feats,torch.split(bboxes,bs,dim=0),t,lost_features,fix_bboxes)\n        end_time=time.time()\n\n        x_start = outputs_coord[-1]  # (batch, num_proposals, 4) predict boxes: absolute coordinates (x1, y1, x2, y2)\n        x_start=post(x_start,images_whwh=images_whwh)\n        pred_noise = self.predict_noise_from_start(x,t,x_start)\n        return ModelPrediction(pred_noise, x_start), outputs_class,outputs_coord,outputs_score,end_time-start_time\n    \n    @torch.no_grad()\n    def new_ddim_sample(self,backbone_feats,images_whwh,ref_targets=None,dynamic_time=True,num_timesteps=1,num_proposals=500,inference_time_range=1,track_candidate=1,diffusion_t=200,clip_denoised=True):\n        batch = images_whwh.shape[0]//2\n        self.sampling_timesteps,self.num_proposals,self.track_candidate,self.inference_time_range=num_timesteps,num_proposals,track_candidate,inference_time_range\n        shape = (batch, self.num_proposals, 4)\n        cur_bboxes= torch.randn(shape,device=self.device,dtype=self.dtype)\n        ref_t_list=[]\n        track_t_list=[]\n        total_time=0\n        if ref_targets is None or self.track_candidate==0:\n            ref_bboxes=torch.randn(shape, device=self.device)\n            for i in range(batch):\n                t = torch.randint(self.num_timesteps-self.inference_time_range, self.num_timesteps,(2,), device=self.device).long()\n                if dynamic_time:\n                    ref_t,track_t=t[0],t[1]\n                else:\n                    ref_t,track_t=t[0],t[0]\n                ref_t_list.append(ref_t)\n                track_t_list.append(track_t)\n        else:\n            labels =ref_targets[..., :5]\n            nlabel = (labels.sum(dim=2) > 0).sum(dim=1)  # number of objects\n            shape = (batch, self.num_proposals, 4)\n            diffused_boxes = []\n            cur_diffused_boxes=[]\n            for batch_idx,num_gt in enumerate(nlabel):\n                gt_bboxes_per_image = box_cxcywh_to_xyxy(labels[batch_idx, :num_gt])\n                image_size_xyxy = images_whwh[batch_idx]\n                gt_boxes = gt_bboxes_per_image  / image_size_xyxy\n                # cxcywh\n                gt_boxes = box_xyxy_to_cxcywh(gt_boxes)\n                # t = torch.randint(self.num_timesteps-self.inference_time_range, self.num_timesteps,(2,), device=self.device).long()\n                # if dynamic_time:\n                #     ref_t,track_t=t[0],t[1]\n                # else:\n                #     ref_t,track_t=t[0],t[0]\n                if batch_idx==0:\n                    ref_t=diffusion_t\n                    track_t=diffusion_t\n                else:\n                    ref_t=diffusion_t\n                    track_t=diffusion_t\n                    self.track_candidate=4\n                d_boxes,d_noise,ref_label= self.prepare_diffusion_concat(gt_boxes,ref_t)\n                diffused_boxes.append(d_boxes)\n                ref_t_list.append(ref_t)\n                d_boxes,d_noise,ref_label= self.prepare_diffusion_concat(gt_boxes,track_t,ref_label)\n                cur_diffused_boxes.append(d_boxes)\n                track_t_list.append(track_t)\n            ref_bboxes=torch.stack(diffused_boxes)\n            cur_bboxes=torch.stack(cur_diffused_boxes)\n\n\n        sampling_timesteps, eta= self.sampling_timesteps, self.ddim_sampling_eta\n\n        def get_time_pairs(t,sampling_timesteps):\n            # [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == total_timesteps\n            times = torch.linspace(-1, t - 1, steps=sampling_timesteps + 1)\n            times = list(reversed(times.int().tolist()))\n            time_pairs = list(zip(times[:-1], times[1:]))  # [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)]\n            return time_pairs\n        \n        ref_t_time_pairs_list=torch.tensor([get_time_pairs(t,sampling_timesteps) for t in ref_t_list],device=self.device,dtype=torch.long)\n        track_t_time_pairs_list=torch.tensor([get_time_pairs(t,sampling_timesteps) for t in track_t_list],device=self.device,dtype=torch.long)\n        # (batch,sampling_timesteps,2)\n        bboxes=torch.cat([ref_bboxes,cur_bboxes],dim=0)\n\n        x_start = None\n        # for (ref_time, ref_time_next),(cur_time, cur_time_next) in zip(ref_time_pairs,cur_time_pairs):\n        for sampling_timestep in range(sampling_timesteps):\n            is_last=sampling_timestep==(sampling_timesteps-1)\n\n            ref_time_cond = ref_t_time_pairs_list[:,sampling_timestep,0]\n            cur_time_cond = track_t_time_pairs_list[:,sampling_timestep,0]\n\n            time_cond=torch.cat([ref_time_cond,cur_time_cond],dim=0)\n\n            self_cond = x_start if self.self_condition else None\n\n            preds, outputs_class, outputs_coord,outputs_score,association_time = self.model_predictions(backbone_feats,images_whwh,bboxes,time_cond,fix_bboxes=False,\n                                                                         x_self_cond=self_cond, clip_x_start=clip_denoised)\n            total_time+=association_time\n            pred_noise, x_start = preds.pred_noise, preds.pred_x_start\n                \n\n            if is_last:\n                bboxes = x_start\n                continue\n\n            if self.box_renewal:  # filter\n                remain_list=[]\n                pre_remain_bboxes=[]\n                pre_remain_x_start=[]\n                pre_remain_pred_noise=[]\n                cur_remain_bboxes=[]\n                cur_remain_x_start=[]\n                cur_remain_pred_noise=[]\n                for i in range(batch):\n                    # if i==0:\n                    #     remain_list.append(len(pred_noise[i,:,:]))\n                    #     pre_remain_pred_noise.append(pred_noise[i,:,:])\n                    #     cur_remain_pred_noise.append(pred_noise[i+batch,:,:])\n                    #     pre_remain_x_start.append(x_start[i,:,:])\n                    #     cur_remain_x_start.append(x_start[i+batch,:,:])\n                    #     pre_remain_bboxes.append(bboxes[i,:,:])\n                    #     cur_remain_bboxes.append(bboxes[i+batch,:,:])\n                    # else:\n                    threshold = 0.2\n                    score_per_image = outputs_score[-1][i]\n                    # pre_score=torch.sqrt(score_per_image*torch.sigmoid(outputs_class[-1][i]))\n                    # cur_score=torch.sqrt(score_per_image*torch.sigmoid(outputs_class[-1][i+batch]))\n                    # value=((pre_score+cur_score)/2).flatten()\n                    value, _ = torch.max(score_per_image, -1, keepdim=False)\n                    keep_idx = value >=threshold\n                    num_remain = torch.sum(keep_idx)\n                    remain_list.append(num_remain)\n                    pre_remain_pred_noise.append(pred_noise[i,keep_idx,:])\n                    cur_remain_pred_noise.append(pred_noise[i+batch,keep_idx,:])\n                    pre_remain_x_start.append(x_start[i,keep_idx,:])\n                    cur_remain_x_start.append(x_start[i+batch,keep_idx,:])\n                    pre_remain_bboxes.append(bboxes[i,keep_idx,:])\n                    cur_remain_bboxes.append(bboxes[i+batch,keep_idx,:])\n                x_start=pre_remain_x_start+cur_remain_x_start\n                bboxes=pre_remain_bboxes+cur_remain_bboxes\n                pred_noise=pre_remain_pred_noise+cur_remain_pred_noise\n\n            def diffusion(sampling_times,bboxes,x_start,pred_noise):\n                \n                times,time_nexts=sampling_times[:,0],sampling_times[:,1]\n\n                alpha = torch.tensor([self.alphas_cumprod[time] for time in times],dtype=self.dtype,device=self.device)\n                alpha_next = torch.tensor([self.alphas_cumprod[time_next] for time_next in time_nexts],dtype=self.dtype,device=self.device)\n\n                sigma = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()\n                c = (1 - alpha_next - sigma ** 2).sqrt()\n\n                if self.box_renewal:\n                    for i in range(batch):\n                        noise = torch.randn_like(bboxes[i])\n                        bboxes[i] = x_start[i] * alpha_next[i].sqrt() + \\\n                            c[i] * pred_noise[i] + \\\n                            sigma[i] * noise\n                        \n                        bboxes[i] = torch.cat((bboxes[i], torch.randn(self.num_proposals - remain_list[i], 4, device=self.device)), dim=0)\n                else:\n                    noise = torch.randn_like(bboxes)\n\n                    bboxes = x_start * alpha_next.sqrt()[:,None,None] + \\\n                        c[:,None,None] * pred_noise + \\\n                        sigma[:,None,None] * noise\n                \n                return bboxes\n            \n            bboxes[:batch]=diffusion(ref_t_time_pairs_list[:,sampling_timestep],bboxes[:batch],x_start[:batch],pred_noise[:batch])\n            bboxes[batch:]=diffusion(track_t_time_pairs_list[:,sampling_timestep],bboxes[batch:],x_start[batch:],pred_noise[batch:])\n\n            if self.box_renewal:\n                bboxes=torch.stack(bboxes)\n\n        box_cls = outputs_class[-1]\n        box_pred = outputs_coord[-1]\n        conf_score=outputs_score[-1]\n\n        return torch.cat([box_pred.view(2*batch,-1,4),box_cls.view(2*batch,-1,1)],dim=-1),conf_score.view(batch,-1,1),total_time\n    \n    # forward diffusion\n    def q_sample(self, x_start, t, noise=None):\n        if noise is None:\n            noise = torch.randn_like(x_start)\n\n        sqrt_alphas_cumprod_t = extract(self.sqrt_alphas_cumprod, t, x_start.shape)\n        sqrt_one_minus_alphas_cumprod_t = extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)\n\n        return sqrt_alphas_cumprod_t * x_start + sqrt_one_minus_alphas_cumprod_t * noise\n\n    def forward(self,features,mate_info,targets=None):\n\n        mate_shape,mate_device,mate_dtype=mate_info\n        self.device=mate_device\n        self.dtype=mate_dtype\n        b,_,h,w=mate_shape\n        \n        images_whwh=torch.tensor([w, h, w, h], dtype=self.dtype, device=self.device)[None,:].expand(2*b,4)\n        if not self.training:\n            results = self.new_ddim_sample(features,images_whwh,targets,dynamic_time=False)\n            return results\n\n        if self.training:\n            targets, x_boxes, noises, t = self.prepare_targets(targets,images_whwh)\n            t=t.squeeze(-1)\n            # t[b:]=t[:b]\n            x_boxes = x_boxes * images_whwh[:,None,:]\n            pre_x_boxes,cur_x_boxes=torch.split(x_boxes,b,dim=0)\n\n            outputs_class,outputs_coord,outputs_score = self.head(features,(pre_x_boxes,cur_x_boxes),t)\n            output = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1],'pred_scores':outputs_score[-1]}\n\n            if self.deep_supervision:\n                output['aux_outputs'] = [{'pred_logits': a, 'pred_boxes': b,'pred_scores': c}\n                                         for a, b, c in zip(outputs_class[:-1], outputs_coord[:-1],outputs_score[:-1])]\n            loss_dict = self.criterion(output, targets)\n            weight_dict = self.criterion.weight_dict\n            for k in loss_dict.keys():\n                if k in weight_dict: \n                    loss_dict[k] *= weight_dict[k]\n            return loss_dict\n \n    def prepare_diffusion_repeat(self,gt_boxes,t,ref_repeat_tensor=None):\n        \"\"\"\n        :param gt_boxes: (cx, cy, w, h), normalized\n        :param num_proposals:\n        \"\"\"\n        t = torch.full((1,),t,device=self.device).long()\n\n        noise = torch.randn(self.num_proposals,4,device=self.device,dtype=self.dtype)\n\n        num_gt = gt_boxes.shape[0]\n        if not num_gt:  # generate fake gt boxes if empty gt boxes\n            gt_boxes = torch.as_tensor([[0.5, 0.5, 1., 1.]], dtype=self.dtype, device=self.device)\n            num_gt = 1\n\n        num_repeat = self.num_proposals // num_gt  # number of repeat except the last gt box in one image\n        repeat_tensor = [num_repeat] * (num_gt - self.num_proposals % num_gt) + [num_repeat + 1] * (\n                self.num_proposals % num_gt)\n        assert sum(repeat_tensor) == self.num_proposals\n        random.shuffle(repeat_tensor)\n        repeat_tensor = torch.tensor(repeat_tensor, device=self.device)\n        if ref_repeat_tensor is not None:\n            repeat_tensor=ref_repeat_tensor\n\n        gt_boxes = (gt_boxes * 2. - 1.) * self.scale\n        x_start = torch.repeat_interleave(gt_boxes, repeat_tensor, dim=0)\n\n        if self.noise_strategy==\"xy\":\n            noise[:,2:]=0\n        # noise sample\n        x = self.q_sample(x_start=x_start, t=t, noise=noise)\n\n        if self.training:\n            x = torch.clamp(x, min=-1 * self.scale, max=self.scale)\n            x = ((x / self.scale) + 1) / 2.\n\n            diff_boxes = box_cxcywh_to_xyxy(x)\n        else:\n            diff_boxes=x\n\n        return diff_boxes,noise,repeat_tensor\n\n    def prepare_diffusion_concat(self,gt_boxes,t,ref_mask=None):\n        \"\"\"\n        :param gt_boxes: (cx, cy, w, h), normalized\n        :param num_proposals:\n        \"\"\"\n        if self.training:\n            self.track_candidate=1\n        t = torch.full((1,),t,device=self.device).long()\n        noise = torch.randn(self.num_proposals, 4, device=self.device,dtype=self.dtype)\n        select_mask=None\n        num_gt = gt_boxes.shape[0]*self.track_candidate\n        if not num_gt:  # generate fake gt boxes if empty gt boxes\n            gt_boxes = torch.as_tensor([[0.5, 0.5, 1., 1.]], dtype=self.dtype, device=self.device)\n            num_gt = 1\n        else:\n            gt_boxes=torch.repeat_interleave(gt_boxes,torch.tensor([self.track_candidate]*gt_boxes.shape[0],device=self.device),dim=0)\n        if num_gt < self.num_proposals:\n            box_placeholder = torch.randn(self.num_proposals - num_gt, 4,\n                                          device=self.device,dtype=self.dtype) / 6. + 0.5  # 3sigma = 1/2 --> sigma: 1/6\n            # box_placeholder=torch.clip(torch.poisson(torch.clip(box_placeholder*5,min=0)),min=1,max=10)/10\n            # box_placeholder=torch.nn.init.uniform_(box_placeholder, a=0, b=1)\n            # box_placeholder=torch.ones_like(box_placeholder)\n            # box_placeholder[:,:2]=box_placeholder[:,:2]/2\n            box_placeholder[:, 2:4] = torch.clip(box_placeholder[:, 2:4], min=1e-4)\n            x_start = torch.cat((gt_boxes, box_placeholder), dim=0)\n        elif num_gt > self.num_proposals:\n            select_mask = [True] * self.num_proposals + [False] * (num_gt - self.num_proposals)\n            random.shuffle(select_mask)\n            if ref_mask is not None:\n                select_mask=ref_mask\n            x_start = gt_boxes[select_mask]\n        else:\n            x_start = gt_boxes\n\n        x_start = (x_start * 2. - 1.) * self.scale\n\n        if self.noise_strategy==\"xy\":\n            noise[:,2:]=0\n        # noise sample\n        x = self.q_sample(x_start=x_start, t=t, noise=noise)\n\n        if self.training:\n            # x=x_start\n\n            x = torch.clamp(x, min=-1 * self.scale, max=self.scale)\n            x = ((x / self.scale) + 1) / 2.\n\n            diff_boxes = box_cxcywh_to_xyxy(x)\n        else:\n            diff_boxes = x\n\n        return diff_boxes, noise, select_mask\n\n    def prepare_targets(self,targets,images_whwh):\n        labels = targets[..., :5]\n        nlabel = (labels.sum(dim=2) > 0).sum(dim=1)  # number of objects\n        new_targets = []\n        diffused_boxes = []\n        noises = []\n        ts = []\n        select_mask={}\n        # select_t={}\n        # select_gt_boxes={}\n        for batch_idx,num_gt in enumerate(nlabel):\n            target = {}\n            gt_bboxes_per_image = box_cxcywh_to_xyxy(labels[batch_idx, :num_gt, 1:5])\n            gt_classes = labels[batch_idx, :num_gt, 0]\n            image_size_xyxy = images_whwh[batch_idx]\n            gt_boxes = gt_bboxes_per_image  / image_size_xyxy\n            # cxcywh\n            gt_boxes = box_xyxy_to_cxcywh(gt_boxes)\n            x_gt_boxes=gt_boxes\n            d_t = torch.randint(0, self.num_timesteps, (1,), device=self.device).long()[0]\n            ## baseline setting\n            # if batch_idx<len(nlabel)//2:\n            #     d_t = torch.randint(0, 40, (1,), device=self.device).long()[0]\n            # else:\n            #     d_t = torch.randint(0, self.num_timesteps, (1,), device=self.device).long()[0]\n            # if select_t.get(batch_idx%(len(nlabel)//2),None) is not None:\n            #     d_t=select_t.get(batch_idx%(len(nlabel)//2),None)\n            # if select_gt_boxes.get(batch_idx%(len(nlabel)//2),None) is not None:\n            #     x_gt_boxes=select_gt_boxes.get(batch_idx%(len(nlabel)//2),None)    \n            d_boxes,d_noise,d_mask= self.prepare_diffusion_concat(x_gt_boxes,d_t,select_mask.get(batch_idx%(len(nlabel)//2),None))\n            if d_mask is not None:\n                select_mask[batch_idx%(len(nlabel)//2)]=d_mask\n            # if d_t is not None:\n            #     select_t[batch_idx%(len(nlabel)//2)]=d_t\n            # if select_gt_boxes.get(batch_idx%(len(nlabel)//2),None) is None:\n            #     select_gt_boxes[batch_idx%(len(nlabel)//2)]=gt_boxes \n            diffused_boxes.append(d_boxes)\n            noises.append(d_noise)\n            ts.append(d_t)\n            target[\"labels\"] = gt_classes.long()\n            target[\"boxes\"] = gt_boxes\n            target[\"boxes_xyxy\"] = gt_bboxes_per_image\n            target[\"image_size_xyxy\"] = image_size_xyxy\n            image_size_xyxy_tgt = image_size_xyxy.unsqueeze(0).repeat(len(gt_boxes), 1)\n            target[\"image_size_xyxy_tgt\"] = image_size_xyxy_tgt\n            new_targets.append(target)\n\n        return new_targets, torch.stack(diffused_boxes), torch.stack(noises), torch.stack(ts)\n\n\n\n"
  },
  {
    "path": "diffusion/models/diffusion_losses.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom fvcore.nn import sigmoid_focal_loss_jit\nimport torchvision.ops as ops\nfrom yolox.utils import box_ops\nfrom yolox.utils.dist import get_world_size, is_dist_avail_and_initialized\nfrom yolox.utils.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh, generalized_box_iou\n\n\nclass SetCriterionDynamicK(nn.Module):\n    \"\"\" This class computes the loss for DiffusionDet.\n    The process happens in two steps:\n        1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n        2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n    \"\"\"\n    def __init__(self,num_classes, matcher, weight_dict, eos_coef, losses, use_focal,use_fed_loss):\n        \"\"\" Create the criterion.\n        Parameters:\n            num_classes: number of object categories, omitting the special no-object category\n            matcher: module able to compute a matching between targets and proposals\n            weight_dict: dict containing as key the names of the losses and as values their relative weight.\n            eos_coef: relative classification weight applied to the no-object category\n            losses: list of all the losses to be applied. See get_loss for list of available losses.\n        \"\"\"\n        super().__init__()\n        self.num_classes = num_classes\n        self.matcher = matcher\n        self.weight_dict = weight_dict\n        self.eos_coef = eos_coef\n        self.losses = losses\n        self.use_focal = use_focal\n        self.use_fed_loss = use_fed_loss\n        if self.use_fed_loss:\n            self.fed_loss_num_classes = 50\n            from detectron2.data.detection_utils import get_fed_loss_cls_weights\n            cls_weight_fun = lambda: get_fed_loss_cls_weights(dataset_names=cfg.DATASETS.TRAIN, freq_weight_power=cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER)  # noqa\n            fed_loss_cls_weights = cls_weight_fun()\n            assert (\n                    len(fed_loss_cls_weights) == self.num_classes\n            ), \"Please check the provided fed_loss_cls_weights. Their size should match num_classes\"\n            self.register_buffer(\"fed_loss_cls_weights\", fed_loss_cls_weights)\n\n        if self.use_focal:\n            self.focal_loss_alpha = 0.25\n            self.focal_loss_gamma = 2.0\n        else:\n            empty_weight = torch.ones(self.num_classes + 1)\n            empty_weight[-1] = self.eos_coef\n            self.register_buffer('empty_weight', empty_weight)\n\n    # copy-paste from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/roi_heads/fast_rcnn.py#L356\n    def get_fed_loss_classes(self, gt_classes, num_fed_loss_classes, num_classes, weight):\n        \"\"\"\n        Args:\n            gt_classes: a long tensor of shape R that contains the gt class label of each proposal.\n            num_fed_loss_classes: minimum number of classes to keep when calculating federated loss.\n            Will sample negative classes if number of unique gt_classes is smaller than this value.\n            num_classes: number of foreground classes\n            weight: probabilities used to sample negative classes\n        Returns:\n            Tensor:\n                classes to keep when calculating the federated loss, including both unique gt\n                classes and sampled negative classes.\n        \"\"\"\n        unique_gt_classes = torch.unique(gt_classes)\n        prob = unique_gt_classes.new_ones(num_classes + 1).float()\n        prob[-1] = 0\n        if len(unique_gt_classes) < num_fed_loss_classes:\n            prob[:num_classes] = weight.float().clone()\n            prob[unique_gt_classes] = 0\n            sampled_negative_classes = torch.multinomial(\n                prob, num_fed_loss_classes - len(unique_gt_classes), replacement=False\n            )\n            fed_loss_classes = torch.cat([unique_gt_classes, sampled_negative_classes])\n        else:\n            fed_loss_classes = unique_gt_classes\n        return fed_loss_classes\n\n    def loss_labels(self, outputs, targets, indices, num_boxes, log=False):\n        \"\"\"Classification loss (NLL)\n        targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n        \"\"\"\n        assert 'pred_logits' in outputs\n        src_logits = outputs['pred_logits']\n        conf_score=torch.cat([outputs['pred_scores'],outputs['pred_scores']],dim=0)\n        p=torch.sqrt(torch.sigmoid(src_logits)*conf_score)\n        src_logits=torch.log(p/(1-p))\n        batch_size = len(targets)\n\n        # src_logits_re=torch.cat((src_logits[:batch_size//2],src_logits[batch_size//2:]),dim=0)\n        # src_logits=(src_logits+src_logits_re)/2\n\n        # idx = self._get_src_permutation_idx(indices)\n        # target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n        target_classes = torch.full(src_logits.shape[:2], self.num_classes,\n                                    dtype=torch.int64, device=src_logits.device)\n        # src_logits_list = []\n        target_classes_o_list = []\n        # target_classes[idx] = target_classes_o\n        for batch_idx in range(batch_size):\n            valid_query = indices[batch_idx%(batch_size//2)][0]\n            gt_multi_idx = indices[batch_idx%(batch_size//2)][1]\n            if len(gt_multi_idx) == 0:\n                continue\n            # bz_src_logits = src_logits[batch_idx]\n            target_classes_o = targets[batch_idx][\"labels\"]\n            target_classes[batch_idx, valid_query] = target_classes_o[gt_multi_idx]\n\n            # src_logits_list.append(bz_src_logits[valid_query])\n            target_classes_o_list.append(target_classes_o[gt_multi_idx])\n\n        if self.use_focal or self.use_fed_loss:\n            num_boxes = torch.cat(target_classes_o_list).shape[0] if len(target_classes_o_list) != 0 else 1\n\n            target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], self.num_classes + 1],\n                                                dtype=src_logits.dtype, layout=src_logits.layout,\n                                                device=src_logits.device)\n            target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)\n            loss_ce=0\n            gt_classes = torch.argmax(target_classes_onehot, dim=-1)\n            target_classes_onehot = target_classes_onehot[:, :, :-1]\n            target_classes_onehot = target_classes_onehot.flatten(0, 1)\n            src_logits = src_logits.flatten(0, 1)\n            if self.use_focal:\n                cls_loss = sigmoid_focal_loss_jit(src_logits, target_classes_onehot, alpha=self.focal_loss_alpha, gamma=self.focal_loss_gamma, reduction=\"none\")\n            else:\n                cls_loss = F.binary_cross_entropy_with_logits(src_logits, target_classes_onehot, reduction=\"none\")\n            if self.use_fed_loss:\n                K = self.num_classes\n                N = src_logits.shape[0]\n                fed_loss_classes = self.get_fed_loss_classes(\n                    gt_classes,\n                    num_fed_loss_classes=self.fed_loss_num_classes,\n                    num_classes=K,\n                    weight=self.fed_loss_cls_weights,\n                )\n                fed_loss_classes_mask = fed_loss_classes.new_zeros(K + 1)\n                fed_loss_classes_mask[fed_loss_classes] = 1\n                fed_loss_classes_mask = fed_loss_classes_mask[:K]\n                weight = fed_loss_classes_mask.view(1, K).expand(N, K).float()\n\n                loss_ce += torch.sum(cls_loss * weight) / num_boxes\n            else:\n                loss_ce += torch.sum(cls_loss) / num_boxes\n\n            losses = {'loss_ce': loss_ce}\n        else:\n            raise NotImplementedError\n\n        return losses\n\n    def loss_boxes(self, outputs, targets, indices, num_boxes):\n        \"\"\"Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n           targets dicts must contain the key \"boxes\" containing a tensor of dim [nb_target_boxes, 4]\n           The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.\n        \"\"\"\n        assert 'pred_boxes' in outputs\n        # idx = self._get_src_permutation_idx(indices)\n        src_boxes = outputs['pred_boxes']\n\n        batch_size = len(targets)\n        pred_box_list = []\n        pred_norm_box_list = []\n        tgt_box_list = []\n        tgt_box_xyxy_list = []\n        for batch_idx in range(batch_size):\n            valid_query = indices[batch_idx%(batch_size//2)][0]\n            gt_multi_idx = indices[batch_idx%(batch_size//2)][1]\n            if len(gt_multi_idx) == 0:\n                continue\n            bz_image_whwh = targets[batch_idx]['image_size_xyxy']\n            bz_src_boxes = src_boxes[batch_idx]\n            bz_target_boxes = targets[batch_idx][\"boxes\"]  # normalized (cx, cy, w, h)\n            bz_target_boxes_xyxy = targets[batch_idx][\"boxes_xyxy\"]  # absolute (x1, y1, x2, y2)\n            pred_box_list.append(bz_src_boxes[valid_query])\n            pred_norm_box_list.append(bz_src_boxes[valid_query] / bz_image_whwh)  # normalize (x1, y1, x2, y2)\n            tgt_box_list.append(bz_target_boxes[gt_multi_idx])\n            tgt_box_xyxy_list.append(bz_target_boxes_xyxy[gt_multi_idx])\n\n        if len(pred_box_list) != 0:\n            src_boxes = torch.cat(pred_box_list)\n            src_boxes_re=torch.cat(pred_box_list[-len(pred_box_list)//2:]+pred_box_list[:len(pred_box_list)//2])\n            src_boxes_norm = torch.cat(pred_norm_box_list)  # normalized (x1, y1, x2, y2)\n            target_boxes = torch.cat(tgt_box_list)\n            target_boxes_abs_xyxy = torch.cat(tgt_box_xyxy_list)\n            target_boxes_abs_xyxy_re=torch.cat(tgt_box_xyxy_list[-len(tgt_box_xyxy_list)//2:]+tgt_box_xyxy_list[:len(tgt_box_xyxy_list)//2])\n            num_boxes = src_boxes.shape[0]\n            losses = {}\n            # require normalized (x1, y1, x2, y2)\n            loss_bbox = F.l1_loss(src_boxes_norm, box_cxcywh_to_xyxy(target_boxes), reduction='none')\n            losses['loss_bbox'] = loss_bbox.sum() / num_boxes\n\n            # loss_giou = giou_loss(box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))\n            loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(src_boxes,src_boxes_re,target_boxes_abs_xyxy,target_boxes_abs_xyxy_re))\n            losses['loss_giou'] = loss_giou.sum() / num_boxes\n        else:\n            losses = {'loss_bbox': outputs['pred_boxes'].sum() * 0,\n                      'loss_giou': outputs['pred_boxes'].sum() * 0}\n\n        return losses\n\n    def _get_src_permutation_idx(self, indices):\n        # permute predictions following indices\n        batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n        src_idx = torch.cat([src for (src, _) in indices])\n        return batch_idx, src_idx\n\n    def _get_tgt_permutation_idx(self, indices):\n        # permute targets following indices\n        batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n        tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n        return batch_idx, tgt_idx\n\n    def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):\n        loss_map = {\n            'labels': self.loss_labels,\n            'boxes': self.loss_boxes,\n        }\n        assert loss in loss_map, f'do you really want to compute {loss} loss?'\n        return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)\n\n    def forward(self, outputs, targets):\n        \"\"\" This performs the loss computation.\n        Parameters:\n             outputs: dict of tensors, see the output specification of the model for the format\n             targets: list of dicts, such that len(targets) == batch_size.\n                      The expected keys in each dict depends on the losses applied, see each loss' doc\n        \"\"\"\n        outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}\n\n        # Retrieve the matching between the outputs of the last layer and the targets\n        indices, _ = self.matcher(outputs_without_aux, targets)\n\n        # Compute the average number of target boxes accross all nodes, for normalization purposes\n        num_boxes = sum(len(t[\"labels\"]) for t in targets)//2\n        num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)\n        if is_dist_avail_and_initialized():\n            torch.distributed.all_reduce(num_boxes)\n        num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n        # Compute all the requested losses\n        losses = {}\n        for loss in self.losses:\n            losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n\n        # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n        if 'aux_outputs' in outputs:\n            for i, aux_outputs in enumerate(outputs['aux_outputs']):\n                indices, _ = self.matcher(aux_outputs, targets)\n                for loss in self.losses:\n                    if loss == 'masks':\n                        # Intermediate masks losses are too costly to compute, we ignore them.\n                        continue\n                    kwargs = {}\n                    if loss == 'labels':\n                        # Logging is enabled only for the last layer\n                        kwargs = {'log': False}\n                    l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n                    l_dict = {k + f'_{i}': v for k, v in l_dict.items()}\n                    losses.update(l_dict)\n\n        return losses\n\n\nclass HungarianMatcherDynamicK(nn.Module):\n    \"\"\"This class computes an assignment between the targets and the predictions of the network\n    For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n    there are more predictions than targets. In this case, we do a 1-to-k (dynamic) matching of the best predictions,\n    while the others are un-matched (and thus treated as non-objects).\n    \"\"\"\n    def __init__(self,  cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1, cost_mask: float = 1, use_focal: bool = False,use_fed_loss=False):\n        \"\"\"Creates the matcher\n        Params:\n            cost_class: This is the relative weight of the classification error in the matching cost\n            cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost\n            cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost\n        \"\"\"\n        super().__init__()\n        self.cost_class = cost_class\n        self.cost_bbox = cost_bbox\n        self.cost_giou = cost_giou\n        self.use_focal = use_focal\n        self.use_fed_loss = use_fed_loss\n        self.ota_k = 5\n        if self.use_focal:\n            self.focal_loss_alpha = 0.25\n            self.focal_loss_gamma = 2.0\n        assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0,  \"all costs cant be 0\"\n\n    def forward(self, outputs, targets):\n        \"\"\" simOTA for detr\"\"\"\n        with torch.no_grad():\n            bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n            conf_score=outputs[\"pred_scores\"]\n            # We flatten to compute the cost matrices in a batch\n            pred_logits_pre,pred_logits_curr=torch.split(outputs[\"pred_logits\"],bs//2,dim=0)\n            out_bbox_pre,out_bbox_curr = torch.split(outputs[\"pred_boxes\"],bs//2,dim=0)\n            if self.use_focal or self.use_fed_loss:\n                out_prob_pre = torch.sqrt(pred_logits_pre.sigmoid()*conf_score)  # [batch_size, num_queries, num_classes]\n                out_prob_curr = torch.sqrt(pred_logits_curr.sigmoid()*conf_score)\n            else:\n                out_prob_pre = torch.sqrt(pred_logits_pre.softmax(-1)*conf_score) # [batch_size, num_queries, num_classes]\n                out_prob_curr=torch.sqrt(pred_logits_curr.softmax(-1)*conf_score)\n            indices = []\n            matched_ids = []\n            assert bs == len(targets)\n            for batch_idx in range(bs//2):\n                bz_boxes_pre = out_bbox_pre[batch_idx]  # [num_proposals, 4]\n                bz_out_prob_pre = out_prob_pre[batch_idx]\n                bz_boxes_curr = out_bbox_curr[batch_idx]  # [num_proposals, 4]\n                bz_out_prob_curr = out_prob_curr[batch_idx]\n                bz_tgt_ids_pre = targets[batch_idx][\"labels\"]\n                bz_tgt_ids_curr = targets[batch_idx+bs//2][\"labels\"]\n                num_insts = len(bz_tgt_ids_pre)\n                assert len(bz_tgt_ids_curr)==num_insts,\"YaHoo {},{}!\".format(len(bz_tgt_ids_curr),num_insts)\n                if num_insts == 0:  # empty object in key frame\n                    non_valid = torch.zeros(bz_out_prob_pre.shape[0]).to(bz_out_prob_pre) > 0\n                    indices_batchi = (non_valid, torch.arange(0, 0).to(bz_out_prob_pre))\n                    matched_qidx = torch.arange(0, 0).to(bz_out_prob_pre)\n                    indices.append(indices_batchi)\n                    matched_ids.append(matched_qidx)\n                    continue\n\n                bz_gtboxs_pre = targets[batch_idx]['boxes']  # [num_gt, 4] normalized (cx, xy, w, h)\n                bz_gtboxs_abs_xyxy_pre = targets[batch_idx]['boxes_xyxy']\n                bz_gtboxs_curr = targets[batch_idx+bs//2]['boxes']  # [num_gt, 4] normalized (cx, xy, w, h)\n                bz_gtboxs_abs_xyxy_curr = targets[batch_idx+bs//2]['boxes_xyxy']\n                fg_mask_pre, is_in_boxes_and_center_pre = self.get_in_boxes_info(\n                    box_xyxy_to_cxcywh(bz_boxes_pre),  # absolute (cx, cy, w, h)\n                    box_xyxy_to_cxcywh(bz_gtboxs_abs_xyxy_pre),  # absolute (cx, cy, w, h)\n                    expanded_strides=32\n                )\n                fg_mask_curr, is_in_boxes_and_center_curr = self.get_in_boxes_info(\n                    box_xyxy_to_cxcywh(bz_boxes_curr),  # absolute (cx, cy, w, h)\n                    box_xyxy_to_cxcywh(bz_gtboxs_abs_xyxy_curr),  # absolute (cx, cy, w, h)\n                    expanded_strides=32\n                )\n                fg_mask=fg_mask_pre&fg_mask_curr \n                is_in_boxes_and_center=is_in_boxes_and_center_pre&is_in_boxes_and_center_curr\n\n                pair_wise_ious_pre = ops.box_iou(bz_boxes_pre, bz_gtboxs_abs_xyxy_pre)\n                pair_wise_ious_curr = ops.box_iou(bz_boxes_curr, bz_gtboxs_abs_xyxy_curr)\n                pair_wise_ious=(pair_wise_ious_pre+pair_wise_ious_curr)/2\n                cost_class=0\n                bz_out_prob_set=[bz_out_prob_pre,bz_out_prob_curr]\n                bz_tgt_ids_set=[bz_tgt_ids_pre,bz_tgt_ids_curr]\n                # Compute the classification cost.\n                if self.use_focal:\n                    alpha = self.focal_loss_alpha\n                    gamma = self.focal_loss_gamma\n                    for bz_out_prob,bz_tgt_ids in zip(bz_out_prob_set,bz_tgt_ids_set):\n                        neg_cost_class = (1 - alpha) * (bz_out_prob ** gamma) * (-(1 - bz_out_prob + 1e-8).log())\n                        pos_cost_class = alpha * ((1 - bz_out_prob) ** gamma) * (-(bz_out_prob + 1e-8).log())\n                        cost_class += pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids]\n                elif self.use_fed_loss:\n                    # focal loss degenerates to naive one\n                    for bz_out_prob,bz_tgt_ids in zip(bz_out_prob_set,bz_tgt_ids_set):\n                        neg_cost_class = (-(1 - bz_out_prob + 1e-8).log())\n                        pos_cost_class = (-(bz_out_prob + 1e-8).log())\n                        cost_class += pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids]\n                else:\n                    for bz_out_prob,bz_tgt_ids in zip(bz_out_prob_set,bz_tgt_ids_set):\n                        cost_class += -bz_out_prob[:, bz_tgt_ids]\n\n                # Compute the L1 cost between boxes\n                # image_size_out = torch.cat([v[\"image_size_xyxy\"].unsqueeze(0) for v in targets])\n                # image_size_out = image_size_out.unsqueeze(1).repeat(1, num_queries, 1).flatten(0, 1)\n                # image_size_tgt = torch.cat([v[\"image_size_xyxy_tgt\"] for v in targets])\n\n                bz_image_size_out_pre = targets[batch_idx]['image_size_xyxy']\n                bz_image_size_tgt_pre = targets[batch_idx]['image_size_xyxy_tgt']\n                bz_image_size_out_curr = targets[batch_idx+bs//2]['image_size_xyxy']\n                bz_image_size_tgt_curr = targets[batch_idx+bs//2]['image_size_xyxy_tgt']\n\n                bz_out_bbox_pre = bz_boxes_pre / bz_image_size_out_pre  # normalize (x1, y1, x2, y2)\n                bz_out_bbox_curr = bz_boxes_curr / bz_image_size_out_curr  # normalize (x1, y1, x2, y2)\n                bz_tgt_bbox_pre = bz_gtboxs_abs_xyxy_pre / bz_image_size_tgt_pre  # normalize (x1, y1, x2, y2)\n                bz_tgt_bbox_curr = bz_gtboxs_abs_xyxy_curr / bz_image_size_tgt_curr  # normalize (x1, y1, x2, y2)\n                cost_bbox_pre = torch.cdist(bz_out_bbox_pre, bz_tgt_bbox_pre, p=1)\n                cost_bbox_curr = torch.cdist(bz_out_bbox_curr, bz_tgt_bbox_curr, p=1)\n\n                cost_giou = -generalized_box_iou(bz_boxes_pre,bz_boxes_curr,bz_gtboxs_abs_xyxy_pre,bz_gtboxs_abs_xyxy_curr)\n\n                # Final cost matrix\n                cost = self.cost_bbox * (cost_bbox_pre+cost_bbox_curr)/2 + self.cost_class * cost_class/2 + self.cost_giou * cost_giou + 100.0 * (~is_in_boxes_and_center)\n                assert not torch.any(torch.isnan(cost)),\"Error nan value occurs\"\n                # cost = (cost_class + 3.0 * cost_giou + 100.0 * (~is_in_boxes_and_center))  # [num_query,num_gt]\n                cost[~fg_mask] = cost[~fg_mask] + 10000.0\n\n                # if bz_gtboxs.shape[0]>0:\n                indices_batchi, matched_qidx = self.dynamic_k_matching(cost, pair_wise_ious, bz_gtboxs_pre.shape[0])\n\n                indices.append(indices_batchi)\n                matched_ids.append(matched_qidx)\n\n        return indices, matched_ids\n\n    def get_in_boxes_info(self, boxes, target_gts, expanded_strides):\n        xy_target_gts = box_cxcywh_to_xyxy(target_gts)  # (x1, y1, x2, y2)\n\n        anchor_center_x = boxes[:, 0].unsqueeze(1)\n        anchor_center_y = boxes[:, 1].unsqueeze(1)\n\n        # whether the center of each anchor is inside a gt box\n        b_l = anchor_center_x > xy_target_gts[:, 0].unsqueeze(0)\n        b_r = anchor_center_x < xy_target_gts[:, 2].unsqueeze(0)\n        b_t = anchor_center_y > xy_target_gts[:, 1].unsqueeze(0)\n        b_b = anchor_center_y < xy_target_gts[:, 3].unsqueeze(0)\n        # (b_l.long()+b_r.long()+b_t.long()+b_b.long())==4 [300,num_gt] ,\n        is_in_boxes = ((b_l.long() + b_r.long() + b_t.long() + b_b.long()) == 4)\n        is_in_boxes_all = is_in_boxes.sum(1) > 0  # [num_query]\n        # in fixed center\n        center_radius = 2.5\n        # Modified to self-adapted sampling --- the center size depends on the size of the gt boxes\n        # https://github.com/dulucas/UVO_Challenge/blob/main/Track1/detection/mmdet/core/bbox/assigners/rpn_sim_ota_assigner.py#L212\n        b_l = anchor_center_x > (target_gts[:, 0] - (center_radius * (xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)\n        b_r = anchor_center_x < (target_gts[:, 0] + (center_radius * (xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)\n        b_t = anchor_center_y > (target_gts[:, 1] - (center_radius * (xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)\n        b_b = anchor_center_y < (target_gts[:, 1] + (center_radius * (xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)\n\n        is_in_centers = ((b_l.long() + b_r.long() + b_t.long() + b_b.long()) == 4)\n        is_in_centers_all = is_in_centers.sum(1) > 0\n\n        is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all\n        is_in_boxes_and_center = (is_in_boxes & is_in_centers)\n\n        return is_in_boxes_anchor, is_in_boxes_and_center\n\n    def dynamic_k_matching(self, cost, pair_wise_ious, num_gt):\n        matching_matrix = torch.zeros_like(cost)  # [300,num_gt]\n        ious_in_boxes_matrix = pair_wise_ious\n        n_candidate_k = self.ota_k\n\n        # Take the sum of the predicted value and the top 10 iou of gt with the largest iou as dynamic_k\n        topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=0)\n        dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1)\n\n        for gt_idx in range(num_gt):\n            _, pos_idx = torch.topk(cost[:, gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)\n            matching_matrix[:, gt_idx][pos_idx] = 1.0\n\n        del topk_ious, dynamic_ks, pos_idx\n\n        anchor_matching_gt = matching_matrix.sum(1)\n\n        if (anchor_matching_gt > 1).sum() > 0:\n            _, cost_argmin = torch.min(cost[anchor_matching_gt > 1], dim=1)\n            matching_matrix[anchor_matching_gt > 1] *= 0\n            matching_matrix[anchor_matching_gt > 1, cost_argmin,] = 1\n\n        while (matching_matrix.sum(0) == 0).any():\n            num_zero_gt = (matching_matrix.sum(0) == 0).sum()\n            matched_query_id = matching_matrix.sum(1) > 0\n            cost[matched_query_id] += 100000.0\n            unmatch_id = torch.nonzero(matching_matrix.sum(0) == 0, as_tuple=False).squeeze(1)\n            for gt_idx in unmatch_id:\n                pos_idx = torch.argmin(cost[:, gt_idx])\n                matching_matrix[:, gt_idx][pos_idx] = 1.0\n            if (matching_matrix.sum(1) > 1).sum() > 0:  # If a query matches more than one gt\n                _, cost_argmin = torch.min(cost[anchor_matching_gt > 1],\n                                           dim=1)  # find gt for these queries with minimal cost\n                matching_matrix[anchor_matching_gt > 1] *= 0  # reset mapping relationship\n                matching_matrix[anchor_matching_gt > 1, cost_argmin,] = 1  # keep gt with minimal cost\n\n        assert not (matching_matrix.sum(0) == 0).any()\n        selected_query = matching_matrix.sum(1) > 0\n        gt_indices = matching_matrix[selected_query].max(1)[1]\n        assert selected_query.sum() == len(gt_indices)\n\n        cost[matching_matrix == 0] = cost[matching_matrix == 0] + float('inf')\n        matched_query_id = torch.min(cost, dim=0)[1]\n\n        return (selected_query, gt_indices), matched_query_id\n"
  },
  {
    "path": "diffusion/models/diffusion_models.py",
    "content": "import copy\nimport math\n\nimport numpy as np\nimport torch\nfrom torch import einsum, nn\nimport torch.nn.functional as F\n\nfrom einops import rearrange, repeat\nfrom einops_exts import rearrange_many\n\n\n\ndef exists(val):\n    return val is not None\nfrom detectron2.modeling.poolers import ROIPooler\nfrom detectron2.structures import Boxes\n\n\n_DEFAULT_SCALE_CLAMP = math.log(100000.0 / 16)\n\n\n\nclass SinusoidalPositionEmbeddings(nn.Module):\n    def __init__(self, dim):\n        super().__init__()\n        self.dim = dim\n\n    def forward(self, time):\n        device = time.device\n        half_dim = self.dim // 2\n        embeddings = math.log(10000) / (half_dim - 1)\n        embeddings = torch.exp(torch.arange(half_dim, device=device) * -embeddings)\n        embeddings = time[:, None] * embeddings[None, :]\n        embeddings = torch.cat((embeddings.sin(), embeddings.cos()), dim=-1)\n        return embeddings\n\n\nclass GaussianFourierProjection(nn.Module):\n    \"\"\"Gaussian random features for encoding time steps.\"\"\"\n\n    def __init__(self, embed_dim, scale=30.):\n        super().__init__()\n        # Randomly sample weights during initialization. These weights are fixed\n        # during optimization and are not trainable.\n        self.W = nn.Parameter(torch.randn(embed_dim // 2) * scale, requires_grad=False)\n\n    def forward(self, x):\n        x_proj = x[:, None] * self.W[None, :] * 2 * np.pi\n        return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)\n\n\nclass Dense(nn.Module):\n    \"\"\"A fully connected layer that reshapes outputs to feature maps.\"\"\"\n\n    def __init__(self, input_dim, output_dim):\n        super().__init__()\n        self.dense = nn.Linear(input_dim, output_dim)\n\n    def forward(self, x):\n        return self.dense(x)\n\n\nclass DynamicHead(nn.Module):\n\n    def __init__(self,\n                num_classes,\n                d_model,\n                pooler_resolution,\n                strides,\n                in_channels,\n                dim_feedforward = 2048,\n                nhead = 8,\n                dropout = 0.0,\n                activation = \"relu\",\n                num_heads = 6,\n                return_intermediate=True,\n                use_focal=False,\n                use_fed_loss=False,\n                prior_prob=0.01\n                ):\n        super().__init__()\n\n        # Build RoI.\n        box_pooler = self._init_box_pooler(pooler_resolution,strides,in_channels)\n        self.box_pooler = box_pooler\n        \n        # Build heads.\n        rcnn_head = RCNNHead(d_model, num_classes,pooler_resolution, dim_feedforward, nhead, dropout, activation,use_focal=use_focal,use_fed_loss=use_fed_loss)\n        self.head_series = _get_clones(rcnn_head, num_heads)\n        self.num_heads = num_heads\n        self.return_intermediate = return_intermediate\n\n        # Gaussian random feature embedding layer for time\n        self.d_model = d_model\n        time_dim = d_model * 4\n        self.time_mlp = nn.Sequential(\n            SinusoidalPositionEmbeddings(d_model),\n            nn.Linear(d_model, time_dim),\n            nn.GELU(),\n            nn.Linear(time_dim, time_dim),\n        )\n\n        # Init parameters.\n        self.use_focal = use_focal\n        self.use_fed_loss = use_fed_loss\n        self.num_classes = num_classes\n        if self.use_focal or self.use_fed_loss:\n            self.bias_value = -math.log((1 - prior_prob) / prior_prob)\n        self._reset_parameters()\n\n    def _reset_parameters(self):\n        # init all parameters.\n        for p in self.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n\n            # initialize the bias for focal loss and fed loss.\n            if self.use_focal or self.use_fed_loss:\n                if p.shape[-1] == self.num_classes or p.shape[-1] == self.num_classes + 1:\n                    nn.init.constant_(p, self.bias_value)\n\n    @staticmethod\n    def _init_box_pooler(pooler_resolution,strides,in_channels):\n\n        pooler_scales = [1/s for s in strides]\n        sampling_ratio = 2\n        pooler_type = \"ROIAlignV2\"\n\n        # If StandardROIHeads is applied on multiple feature maps (as in FPN),\n        # then we share the same predictors and therefore the channel counts must be the same\n        # Check all channel counts are equal\n        assert len(set(in_channels)) == 1, in_channels\n\n        box_pooler = ROIPooler(\n            output_size=pooler_resolution,\n            scales=pooler_scales,\n            sampling_ratio=sampling_ratio,\n            pooler_type=pooler_type,\n        )\n        return box_pooler\n\n    def forward(self,features,init_bboxes,t,lost_features=None,fix_ref_boxes=False):\n        # assert t shape (batch_size)\n        time = self.time_mlp(t)\n\n        inter_class_logits = []\n        inter_pred_bboxes = []\n        inter_association_scores=[]\n\n        bboxes = init_bboxes\n        proposal_features = None\n        \n        for head_idx, rcnn_head in enumerate(self.head_series):\n            class_logits, pred_bboxes, proposal_features ,association_score_logits= rcnn_head(features, bboxes, proposal_features,self.box_pooler,time,lost_features,fix_ref_boxes)\n            if self.return_intermediate:\n                inter_class_logits.append(torch.cat(class_logits,dim=0))\n                inter_pred_bboxes.append(torch.cat(pred_bboxes,dim=0))\n                inter_association_scores.append(torch.sigmoid(association_score_logits))\n            bboxes = (pred_bbox.detach() for pred_bbox in pred_bboxes)\n\n        if self.return_intermediate:\n            return torch.stack(inter_class_logits), torch.stack(inter_pred_bboxes),torch.stack(inter_association_scores)\n\n        return torch.cat(class_logits,dim=0)[None],torch.cat(pred_bboxes,dim=0)[None],torch.sigmoid(association_score_logits)[None]\n\n\nclass RCNNHead(nn.Module):\n\n    def __init__(self,d_model, num_classes, pooler_resolution,dim_feedforward=2048, nhead=8, dropout=0.1, activation=\"relu\",\n                 scale_clamp: float = _DEFAULT_SCALE_CLAMP, bbox_weights=(2.0, 2.0, 1.0, 1.0),use_focal=False,use_fed_loss=False):\n        super().__init__()\n\n        self.d_model = d_model\n\n        # dynamic.\n        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout,batch_first=True)\n        # self.self_attn = FlashSelfAttention(d_model, nhead, attn_drop=dropout)\n        # self.self_attn = WindowAttention(d_model,(8,8),nhead,attn_drop=dropout)\n        # self.cross_attn = nn.MultiheadAttention(d_model,nhead,dropout=dropout)\n        # self.stf=STF(dim=d_model)\n        self.stf=SFT(d_model,pooler_resolution=pooler_resolution)\n\n \n        self.linear1 = nn.Linear(d_model, dim_feedforward)\n        self.dropout = nn.Dropout(dropout)\n        self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n        self.norm1 = nn.LayerNorm(d_model)\n        self.norm2 = nn.LayerNorm(d_model)\n        self.norm3 = nn.LayerNorm(d_model)\n        # self.norm4 = nn.LayerNorm(d_model)\n        self.dropout1 = nn.Dropout(dropout)\n        self.dropout2 = nn.Dropout(dropout)\n        self.dropout3 = nn.Dropout(dropout)\n        # self.dropout4 = nn.Dropout(dropout)\n\n        self.activation = _get_activation_fn(activation)\n\n        # block time mlp\n        self.block_time_mlp = nn.Sequential(nn.SiLU(), nn.Linear(d_model * 4, d_model * 2))\n\n        # cls.\n        num_cls = 1\n        cls_module = list()\n        for _ in range(num_cls):\n            cls_module.append(nn.Linear(d_model, d_model,False))\n            cls_module.append(nn.LayerNorm(d_model))\n            cls_module.append(nn.ReLU(inplace=True))\n        self.cls_module = nn.ModuleList(cls_module)\n\n        # association score.\n        num_score = 1\n        score_module = list()\n        for _ in range(num_score):\n            score_module.append(nn.Linear(2*d_model, d_model,False))\n            score_module.append(nn.LayerNorm(d_model))\n            score_module.append(nn.ReLU(inplace=True))\n        self.score_module = nn.ModuleList(score_module)\n\n\n        # reg.\n        num_reg = 3\n        reg_module = list()\n        for _ in range(num_reg):\n            reg_module.append(nn.Linear(d_model, d_model,True))\n            reg_module.append(nn.LayerNorm(d_model))\n            reg_module.append(nn.ReLU(inplace=True))\n        self.reg_module = nn.ModuleList(reg_module)\n        \n        # pred.\n        self.use_focal = use_focal\n        self.use_fed_loss = use_fed_loss\n        if self.use_focal or self.use_fed_loss:\n            self.class_logits = nn.Linear(d_model, num_classes)\n        else:\n            self.class_logits = nn.Linear(d_model, num_classes + 1)\n        self.score_logits=nn.Linear(d_model,1)\n        self.bboxes_delta = nn.Linear(d_model, 4)\n        self.scale_clamp = scale_clamp\n        self.bbox_weights = bbox_weights\n        nn.init.constant_(self.class_logits.bias,-math.log((1 - 1e-2) / 1e-2))\n        nn.init.constant_(self.bboxes_delta.bias,-math.log((1 - 1e-2) / 1e-2))\n        for sub_module in self.reg_module:\n            if isinstance(sub_module,nn.Linear):\n                nn.init.constant_(sub_module.bias,-math.log((1 - 1e-2) / 1e-2))\n\n    def forward(self, features,bboxes,pro_features,pooler,time_emb,lost_features=None,fix_ref_boxes=False):\n        \"\"\"\n        :param bboxes: (N, nr_boxes, 4)\n        :param pro_features: (N, nr_boxes, d_model)\n        \"\"\"\n        \n        if pro_features is not None:\n            # pro_features_pre,pro_features_curr=pro_features\n            pro_features_x=pro_features\n        else:\n            pro_features_x=None\n        \n        bboxes_pre,bboxes_cur=bboxes\n        \n        N, nr_boxes = bboxes_pre.shape[:2]\n        # rnd_idx = torch.randperm(nr_boxes)\n        # bboxes_pre=bboxes_pre[:,rnd_idx,:]\n        # bboxes_cur=bboxes_cur[:,rnd_idx,:]\n        # roi_feature.\n        proposal_boxes_pre = list()\n        proposal_boxes_curr = list()\n        for b in range(N):\n            proposal_boxes_pre.append(Boxes(bboxes_pre[b]))\n            proposal_boxes_curr.append(Boxes(bboxes_cur[b]))\n\n        roi_features_pre = pooler(features[0], proposal_boxes_pre)\n        if lost_features is not None:\n            roi_features_pre[roi_features_pre.shape[0]-lost_features.shape[0]:]=lost_features\n        roi_features_curr = pooler(features[1], proposal_boxes_curr)\n\n        if pro_features_x is None:\n            pro_features_pre = roi_features_pre.view(N, nr_boxes, self.d_model, -1).mean(-1)\n            pro_features_curr=roi_features_curr.view(N, nr_boxes, self.d_model, -1).mean(-1)\n            pro_features_x=torch.cat([pro_features_pre,pro_features_curr],dim=0)\n        # else:\n        #      pro_features_pre=pro_features_pre.reshape(N, nr_boxes, self.d_model)[:,rnd_idx,:]\n        #      pro_features_curr=pro_features_curr.reshape(N, nr_boxes, self.d_model)[:,rnd_idx,:]\n        roi_features_pre = roi_features_pre.view(N,nr_boxes, self.d_model, -1).permute(0,1,3,2)\n        roi_features_curr = roi_features_curr.view(N,nr_boxes, self.d_model, -1).permute(0,1,3,2)\n\n        roi_features_x=torch.cat([torch.cat([roi_features_pre,roi_features_curr],dim=-2).unsqueeze(2),\n        torch.cat([roi_features_curr,roi_features_pre],dim=-2).unsqueeze(2)],dim=2)\n\n        # self_att.\n        pro_features_x = pro_features_x.view(2*N, nr_boxes, self.d_model)\n        # pro_features_pre =pro_features_pre+ self.dropout1(self.self_attn(pro_features_pre, pro_features_pre, pro_features_pre,20,25))\n        pro_features_x =pro_features_x+ self.dropout1(self.self_attn(pro_features_x, pro_features_x, value=pro_features_x)[0])\n        # pro_features_x =pro_features_x+ self.dropout1(self.self_attn(pro_features_x))\n        pro_features_x = self.norm1(pro_features_x)\n\n        # pro_features_curr = pro_features_curr.view(N, nr_boxes, self.d_model).permute(1, 0, 2)\n        # pro_features_curr = pro_features_curr+ self.dropout1(self.self_attn(pro_features_curr, pro_features_curr,value=pro_features_curr)[0])\n        # # pro_features_curr = pro_features_curr+ self.dropout1(self.self_attn(pro_features_curr, pro_features_curr,pro_features_curr,20,25))\n        # pro_features_curr = self.norm1(pro_features_curr)\n\n        # cross_interact\n        # pro_features_pre = pro_features_pre.view(nr_boxes, N, self.d_model).permute(1, 0, 2).reshape(1, N * nr_boxes, self.d_model)\n        # pro_features_pre =pro_features_pre+self.dropout2(self.cross_interact(pro_features_pre, roi_features_curr))\n        # pro_features_pre = self.norm2(pro_features_pre)\n\n        # pro_features_curr = pro_features_curr.view(nr_boxes, N, self.d_model).permute(1, 0, 2).reshape(1, N * nr_boxes, self.d_model)\n        # pro_features_curr =pro_features_curr+self.dropout2(self.cross_interact(pro_features_curr, roi_features_pre))\n        # pro_features_curr = self.norm2(pro_features_curr)\n\n        pro_features_x=torch.cat([x.unsqueeze(2) for x in pro_features_x.split(N,dim=0)],dim=-2)\n\n        pro_features_x=pro_features_x+self.dropout2(self.stf(roi_features_x,pro_features_x))\n        pro_features_x = self.norm2(pro_features_x)\n\n        # roi_features_x=torch.cat([roi_features_curr.unsqueeze(2),roi_features_pre.unsqueeze(2)],dim=-2)\n        # pro_features_x=pro_features_x+self.dropout4(self.stf2(roi_features_x,pro_features_x))\n        # pro_features_x = self.norm4(pro_features_x)\n\n        pro_features_x=torch.cat([x.squeeze(2) for x in pro_features_x.split(1,dim=-2)],dim=0).reshape(2*N*nr_boxes,-1)\n\n\n        # inst_interact.\n        # pro_features_pre = pro_features_pre.view(nr_boxes, N, self.d_model).permute(1, 0, 2).reshape(1, N * nr_boxes, self.d_model)\n        # pro_features_pre =pro_features_pre+self.dropout3(self.inst_interact(pro_features_pre, roi_features_pre))\n        # obj_features_pre = self.norm3(pro_features_pre)\n\n        # # pro_features_curr = pro_features_curr.view(nr_boxes, N, self.d_model).permute(1, 0, 2).reshape(1, N * nr_boxes, self.d_model)\n        # pro_features_curr =pro_features_curr+self.dropout3(self.inst_interact(pro_features_curr, roi_features_curr))\n        # obj_features_curr = self.norm3(pro_features_curr)\n\n        # obj_feature.\n        obj_features_tmp =self.linear2(self.dropout(self.activation(self.linear1(pro_features_x))))\n        obj_features=pro_features_x+self.dropout3(obj_features_tmp)\n        obj_features= self.norm3(obj_features)\n\n        # obj_features_curr_tmp =self.linear2(self.dropout(self.activation(self.linear1(obj_features_curr))))\n        # obj_features_curr=obj_features_curr+self.dropout4(obj_features_curr_tmp)\n        # obj_features_curr = self.norm4(obj_features_curr)\n        \n        # fc_feature_pre = obj_features_pre.transpose(0, 1).reshape(N * nr_boxes, -1)\n        # fc_feature_curr = obj_features_curr.transpose(0, 1).reshape(N * nr_boxes, -1)\n\n        # all_features=[fc_feature_pre,fc_feature_curr]\n\n        # all_features=[]\n        # for fc_feature,fc_time_emb in zip([fc_feature_pre,fc_feature_curr],time_emb.split(N,dim=0)):\n        scale_shift = self.block_time_mlp(time_emb)\n        scale_shift = torch.repeat_interleave(scale_shift, nr_boxes, dim=0)\n        scale, shift = scale_shift.chunk(2, dim=1)\n        fc_feature = obj_features * (scale + 1) + shift\n        # all_features.append(fc_feature)\n        \n\n        cls_feature= fc_feature.clone()\n        reg_feature= fc_feature.clone()\n        score_feature= torch.cat(fc_feature.clone().split(N*nr_boxes,dim=0),dim=-1)\n\n        for cls_layer in self.cls_module:\n            cls_feature= cls_layer(cls_feature)\n\n        for score_layer in self.score_module:\n            score_feature=score_layer(score_feature)\n    \n        for reg_layer in self.reg_module:\n            reg_feature= reg_layer(reg_feature)     \n        \n        class_logits = self.class_logits(cls_feature)\n        bboxes_deltas= self.bboxes_delta(reg_feature)\n\n        class_logits_pre,class_logits_curr=class_logits.split(N*nr_boxes,dim=0)\n        bboxes_deltas_pre,bboxes_deltas_curr=bboxes_deltas.split(N*nr_boxes,dim=0)\n\n        association_score=self.score_logits(score_feature)\n\n        pred_bboxes_pre = self.apply_deltas(bboxes_deltas_pre, bboxes_pre.view(-1, 4))\n        if fix_ref_boxes:\n            assert not self.training,\"fix reference bboxes only for inference mode\"\n            pred_bboxes_pre[:nr_boxes]=bboxes_pre[0,:nr_boxes]\n        pred_bboxes_curr = self.apply_deltas(bboxes_deltas_curr, bboxes_cur.view(-1, 4))\n            \n        return (class_logits_pre.view(N, nr_boxes, -1),class_logits_curr.view(N, nr_boxes, -1)), (pred_bboxes_pre.view(N, nr_boxes, -1),pred_bboxes_curr.view(N, nr_boxes, -1)),obj_features,association_score.view(N, nr_boxes, -1)\n\n    def apply_deltas(self, deltas, boxes):\n        \"\"\"\n        Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.\n\n        Args:\n            deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.\n                deltas[i] represents k potentially different class-specific\n                box transformations for the single box boxes[i].\n            boxes (Tensor): boxes to transform, of shape (N, 4)\n        \"\"\"\n        boxes = boxes.to(deltas.dtype)\n\n        widths = boxes[:, 2] - boxes[:, 0]\n        heights = boxes[:, 3] - boxes[:, 1]\n        ctr_x = boxes[:, 0] + 0.5 * widths\n        ctr_y = boxes[:, 1] + 0.5 * heights\n\n        wx, wy, ww, wh = self.bbox_weights\n        dx = deltas[:, 0::4] / wx\n        dy = deltas[:, 1::4] / wy\n        dw = deltas[:, 2::4] / ww\n        dh = deltas[:, 3::4] / wh\n\n        # Prevent sending too large values into torch.exp()\n        dw = torch.clamp(dw, max=self.scale_clamp)\n        dh = torch.clamp(dh, max=self.scale_clamp)\n\n        pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]\n        pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]\n        pred_w = torch.exp(dw) * widths[:, None]\n        pred_h = torch.exp(dh) * heights[:, None]\n\n        pred_boxes = torch.zeros_like(deltas)\n        pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w  # x1\n        pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h  # y1\n        pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w  # x2\n        pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h  # y2\n\n        return pred_boxes\n\n\nclass SFT(nn.Module):\n\n    def __init__(self, hidden_dim, pooler_resolution,dim_dynamic=2*64,num_dynamic=2):\n        super().__init__()\n\n        self.hidden_dim = hidden_dim\n        self.dim_dynamic = dim_dynamic\n        self.num_dynamic = num_dynamic\n        self.pooler_resolution= pooler_resolution\n        self.num_params = self.hidden_dim * self.dim_dynamic\n        self.dynamic_layer = nn.Linear(self.hidden_dim, self.num_dynamic * self.num_params)\n\n        self.norm1 = nn.LayerNorm(self.dim_dynamic)\n        self.norm2 = nn.LayerNorm(self.hidden_dim)\n\n        self.activation = nn.ReLU(inplace=True)\n\n        num_output = 2*self.hidden_dim * self.pooler_resolution ** 2\n        self.num_output= 2*self.pooler_resolution ** 2\n        self.out_layer = nn.Linear(num_output, self.hidden_dim)\n        self.norm3 = nn.LayerNorm(self.hidden_dim)\n\n    def forward(self,roi_features,pro_features):\n        '''\n        pro_features: ( N,nr_boxes,2,self.d_model)\n        roi_features: ( N,nr_boxes,2,49*2,self.d_model)\n        '''\n        N=pro_features.shape[0]\n        # features=torch.cat([x.unsqueeze(2) for x in roi_features.split(self.num_output,dim=-2)],dim=2).reshape(-1,self.num_output,self.hidden_dim)\n        features = roi_features.reshape(-1,self.num_output,self.hidden_dim)\n        parameters = self.dynamic_layer(pro_features)\n\n        param1 = parameters[:, :, :,:self.num_params].reshape(-1, self.hidden_dim, self.dim_dynamic)\n        param2 = parameters[:, :, :,self.num_params:].reshape(-1, self.dim_dynamic, self.hidden_dim)\n\n        features = torch.bmm(features, param1)\n        features = self.norm1(features)\n        features = self.activation(features) \n\n        features = torch.bmm(features, param2)\n        features = self.norm2(features)\n        features = self.activation(features)\n\n        features = features.flatten(1)\n        features = self.out_layer(features)\n        features = self.norm3(features)\n        features = self.activation(features)\n\n        return features.reshape(N,-1,2,self.hidden_dim)\n    \n\nclass PerceiverAttention(nn.Module):\n    def __init__(self, *, dim, dim_head=64, heads=8):\n        super().__init__()\n        self.scale = dim_head**-0.5\n        self.heads = heads\n        inner_dim = dim_head * heads\n\n        self.norm_media = nn.LayerNorm(dim)\n        self.norm_latents = nn.LayerNorm(dim)\n\n        self.to_q = nn.Linear(dim, inner_dim, bias=False)\n        self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)\n        self.to_out = nn.Linear(inner_dim, dim, bias=False)\n\n    def forward(self, x, latents):\n        \"\"\"\n        Args:\n            x (torch.Tensor): image features\n                shape (b, T, n1, D)\n            latent (torch.Tensor): latent features\n                shape (b, T, n2, D)\n        \"\"\"\n        x = self.norm_media(x)\n        latents = self.norm_latents(latents)\n\n        h = self.heads\n\n        q = self.to_q(latents)\n        kv_input = torch.cat((x, latents), dim=-2)\n        k, v = self.to_kv(kv_input).chunk(2, dim=-1)\n        q, k, v = rearrange_many((q, k, v), \"b t n (h d) -> b h t n d\", h=h)\n        q = q * self.scale\n\n        # attention\n        sim = einsum(\"... i d, ... j d  -> ... i j\", q, k)\n        sim = sim - sim.amax(dim=-1, keepdim=True).detach()\n        attn = sim.softmax(dim=-1)\n\n        out = einsum(\"... i j, ... j d -> ... i d\", attn, v)\n        out = rearrange(out, \"b h t n d -> b t n (h d)\", h=h)\n        return self.to_out(out)\n    \n\ndef FeedForward(dim, mult=4):\n    inner_dim = int(dim * mult)\n    return nn.Sequential(\n        nn.LayerNorm(dim),\n        nn.Linear(dim, inner_dim, bias=False),\n        nn.GELU(),\n        nn.Linear(inner_dim, dim, bias=False),\n    )\n\n\n# class STF(nn.Module):\n#     def __init__(\n#         self,\n#         *,\n#         dim,\n#         depth=2,\n#         dim_head=64,\n#         heads=8,\n#         ff_mult=4,\n#     ):\n#         super().__init__()\n#         # self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n#         self.layers = nn.ModuleList([])\n#         for _ in range(depth):\n#             self.layers.append(\n#                 nn.ModuleList(\n#                     [\n#                         PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),\n#                         FeedForward(dim=dim, mult=ff_mult),\n#                     ]\n#                 )\n#             )\n\n#         self.norm = nn.LayerNorm(dim)\n\n#     def forward(self,roi_features,pro_features):\n#         '''\n#         pro_features: ( N,nr_boxes,2,self.d_model)\n#         roi_features: ( N,nr_boxes,2,49*2,self.d_model)\n#         '''\n#         b,n,x,dim=pro_features.shape\n\n#         # blocks\n#         latents=pro_features.reshape(b,n*x,1,-1)\n#         roi_features=roi_features.reshape(b,n*x,-1,dim)\n#         for attn, ff in self.layers:\n#             latents = attn(roi_features, latents) + latents\n#             latents = ff(latents) + latents\n\n#         return self.norm(latents).reshape(b,n,x,dim)\n        \n\nclass WindowAttention(nn.Module):\n    \"\"\" Window based multi-head self attention (W-MSA) module with relative position bias.\n    It supports both of shifted and non-shifted window.\n    Args:\n        dim (int): Number of input channels.\n        window_size (tuple[int]): The height and width of the window.\n        num_heads (int): Number of attention heads.\n        qkv_bias (bool, optional):  If True, add a learnable bias to query, key, value. Default: True\n        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set\n        attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0\n        proj_drop (float, optional): Dropout ratio of output. Default: 0.0\n    \"\"\"\n\n    def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):\n\n        super().__init__()\n        self.dim = dim\n        self.window_size = window_size  # Wh, Ww\n        self.num_heads = num_heads\n        head_dim = dim // num_heads\n        self.scale = qk_scale or head_dim ** -0.5\n\n\n        self.to_q = nn.Linear(dim, dim, bias=qkv_bias)\n        self.to_k = nn.Linear(dim, dim, bias=qkv_bias)\n        self.to_v = nn.Linear(dim, dim, bias=qkv_bias)\n        self.attn_drop = nn.Dropout(attn_drop)\n        self.proj = nn.Linear(dim, dim)\n        self.proj_drop = nn.Dropout(proj_drop)\n\n        # trunc_normal_(self.relative_position_bias_table, std=.02)\n        self.softmax = nn.Softmax(dim=-1)\n\n    def forward(self,q,k,v,H,W):\n        \"\"\" Forward function.\n        Args:\n            x: input features with shape of (num_windows*B, N, C)\n            mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None\n        \"\"\"\n        B_, N, C = q.shape\n        assert N==k.shape[1] and N==v.shape[1],\"query,key and value must have equal length\"\n        pad_l = pad_t = 0\n        pad_r = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1]\n        pad_b = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0]\n        Hp, Wp=0,0\n        def mode_charge(x):\n            x = x.reshape(B_, H, W, C)\n\n            x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))\n            _, Hp, Wp, _ = x.shape\n\n            x = window_partition(x, self.window_size[0])  # nW*B, window_size, window_size, C\n            x = x.view(-1, self.window_size[1] * self.window_size[0], C)  # nW*B, window_size*window_size, C\n            return x,Hp,Wp\n        (q,Hp,Wp),(k,_,_),(v,_,_)=mode_charge(q),mode_charge(k),mode_charge(v)\n        B_w = q.shape[0]\n        N_w = q.shape[1]\n        q= self.to_q(q).reshape(B_w, N_w, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)[0]\n        k= self.to_k(k).reshape(B_w, N_w, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)[0]\n        v= self.to_v(v).reshape(B_w, N_w, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)[0]\n\n        q = q * self.scale\n        attn = (q @ k.transpose(-2, -1))\n\n        attn = self.softmax(attn)\n\n        attn = self.attn_drop(attn)\n\n        x = (attn @ v).transpose(1, 2).reshape(B_w, N_w, C)\n        x = self.proj(x)\n        x = self.proj_drop(x)\n\n        x = x.view(-1, self.window_size[1], self.window_size[0], C)\n        x = window_reverse(x, self.window_size[0], Hp, Wp)  # B H' W' C\n \n        if pad_r > 0 or pad_b > 0:\n            x = x[:, :H, :W, :].contiguous()\n\n        x = x.view(B_, H * W, C)\n\n        return x\n    \n\n    \ndef _get_clones(module, N):\n    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef _get_activation_fn(activation):\n    \"\"\"Return an activation function given a string\"\"\"\n    if activation == \"relu\":\n        return F.relu\n    if activation == \"gelu\":\n        return F.gelu\n    if activation == \"glu\":\n        return F.glu\n    raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")\n\ndef window_partition(x, window_size):\n    \"\"\"\n    Args:\n        x: (B, H, W, C)\n        window_size (int): window size\n    Returns:\n        windows: (num_windows*B, window_size, window_size, C)\n    \"\"\"\n    B, H, W, C = x.shape\n    x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)\n    windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n    return windows\n\n\ndef window_reverse(windows, window_size, H, W):\n    \"\"\"\n    Args:\n        windows: (num_windows*B, window_size, window_size, C)\n        window_size (int): Window size\n        H (int): Height of image\n        W (int): Width of image\n    Returns:\n        x: (B, H, W, C)\n    \"\"\"\n    B = int(windows.shape[0] / (H * W / window_size / window_size))\n    x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)\n    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n    return x\n\n# from flash_attn import flash_attn_qkvpacked_func, flash_attn_func\n# class FlashSelfAttention(nn.Module):\n\n#     def __init__(self, dim,num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):\n\n#         super().__init__()\n#         self.dim = dim\n#         self.num_heads = num_heads\n#         head_dim = dim // num_heads\n#         self.scale = qk_scale or head_dim ** -0.5\n\n#         # self.in_proj = nn.Linear(dim, 3*dim, bias=qkv_bias)\n#         self.in_proj_weight = nn.Parameter(torch.empty((3 * dim,dim)))\n#         if qkv_bias:\n#             self.in_proj_bias = nn.Parameter(torch.empty(3 * dim))\n#         else:\n#             self.register_parameter('in_proj_bias', None)\n#         self.attn_drop = nn.Dropout(attn_drop)\n#         self.out_proj = nn.Linear(dim, dim)\n#         self.proj_drop = nn.Dropout(proj_drop)\n\n\n#     def forward(self,x):\n#         \"\"\"\n#         x: B,N,C\n#         \"\"\"\n#         B_, N, C = x.shape\n#         qkv=F.linear(x, self.in_proj_weight , self.in_proj_bias).reshape(B_,N,3,self.num_heads,-1)\n#         x=flash_attn_qkvpacked_func(qkv,self.attn_drop.p if self.training else 0.0,softmax_scale=self.scale).reshape(B_,N,-1)\n#         x=self.out_proj(x)\n#         x=self.proj_drop(x)\n#         return x\n"
  },
  {
    "path": "diffusion/models/diffusionnet.py",
    "content": "import math\nimport random\nfrom typing import List\nfrom collections import namedtuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom yolox.models.yolo_pafpn import YOLOPAFPN\nfrom .diffusion_head import DiffusionHead\nfrom yolox.models.network_blocks import BaseConv\n\nclass DiffusionNet(nn.Module):\n    \"\"\"\n    Implement DiffusionNet\n    \"\"\"\n\n    def __init__(self, backbone=None, head=None, act=\"silu\"):\n        super().__init__()\n        self.backbone=backbone\n        self.head=head\n        self.projs=nn.ModuleList()\n        in_channels=backbone.in_channels\n        for i in range(len(in_channels)):\n            self.projs.append(\n                BaseConv(\n                    in_channels=int(in_channels[i] * head.width),\n                    out_channels=int(head.hidden_dim),\n                    ksize=1,\n                    stride=1,\n                    act=act,\n                ))\n\n    def forward(self, x, targets=(None,None),random_flip=False,input_size=None):\n        # fpn output content features of [dark3, dark4, dark5]\n        # x format (pre_imgs,cur_imgs) (B,C,H,W)\n        # targets format (pre_targets,cur_targets) (B,N,5) class cx cy w h\n        pre_imgs,cur_imgs=x\n        pre_targets,cur_targets=targets\n        mate_info=(pre_imgs.shape,pre_imgs.device,pre_imgs.dtype)\n        bs,_,_,_=mate_info[0]\n        if cur_imgs is None:\n            x_input=pre_imgs\n        else:\n            x_input=torch.cat([pre_imgs,cur_imgs],dim=0)\n\n        fpn_outs = self.backbone(x_input)\n        flip_mode=False\n        if random_flip and torch.randn((1,1))[0]>0.5:\n            flip_mode=True\n        pre_features,cur_features=[],[]\n        \n        for proj,x_out in zip(self.projs,fpn_outs):\n            l_feat=proj(x_out)\n            if cur_imgs is None:\n                pre_features.append(l_feat)\n                if flip_mode:\n                    cur_features.append(torch.flip(l_feat,dims=[3]))\n                else:\n                    cur_features.append(l_feat.clone())\n            else:\n                pre_l_feat,cur_l_feat=l_feat.split(bs,dim=0)\n                pre_features.append(pre_l_feat)\n                cur_features.append(cur_l_feat)\n\n        features=(pre_features,cur_features)\n\n        if self.training:\n            assert pre_targets is not None\n            if cur_targets is None:\n                cur_targets=pre_targets.clone()\n                if flip_mode:\n                    nlabels=(cur_targets.sum(-1)>0).sum(-1)\n                    for idx,nlabel in enumerate(nlabels):\n                        cur_targets[idx,:nlabel,1]=input_size[1]-cur_targets[idx,:nlabel,1]\n            loss_dict = self.head(\n                features,mate_info,targets=torch.cat([pre_targets,cur_targets],dim=0))\n            if 'total_loss' not in loss_dict:\n                loss_dict['total_loss']=sum(loss_dict.values())\n            outputs=loss_dict\n            return outputs\n        else:  \n            outputs = self.head(features,mate_info,targets=pre_targets)\n\n        return outputs\n\n\n"
  },
  {
    "path": "exps/default/nano.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\nimport torch.nn as nn\n\nfrom yolox.exp import Exp as MyExp\n\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.depth = 0.33\n        self.width = 0.25\n        self.scale = (0.5, 1.5)\n        self.random_size = (10, 20)\n        self.test_size = (416, 416)\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n        self.enable_mixup = False\n\n    def get_model(self, sublinear=False):\n\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n        if \"model\" not in self.__dict__:\n            from yolox.models import YOLOX, YOLOPAFPN, YOLOXHead\n            in_channels = [256, 512, 1024]\n            # NANO model use depthwise = True, which is main difference.\n            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels, depthwise=True)\n            head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels, depthwise=True)\n            self.model = YOLOX(backbone, head)\n\n        self.model.apply(init_yolo)\n        self.model.head.initialize_biases(1e-2)\n        return self.model\n"
  },
  {
    "path": "exps/default/yolov3.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\nimport torch\nimport torch.nn as nn\n\nfrom yolox.exp import Exp as MyExp\n\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.depth = 1.0\n        self.width = 1.0\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n\n    def get_model(self, sublinear=False):\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n        if \"model\" not in self.__dict__:\n            from yolox.models import YOLOX, YOLOFPN, YOLOXHead\n            backbone = YOLOFPN()\n            head = YOLOXHead(self.num_classes, self.width, in_channels=[128, 256, 512], act=\"lrelu\")\n            self.model = YOLOX(backbone, head)\n        self.model.apply(init_yolo)\n        self.model.head.initialize_biases(1e-2)\n\n        return self.model\n\n    def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n        from data.datasets.cocodataset import COCODataset\n        from data.datasets.mosaicdetection import MosaicDetection\n        from data.datasets.data_augment import TrainTransform\n        from data.datasets.dataloading import YoloBatchSampler, DataLoader, InfiniteSampler\n        import torch.distributed as dist\n\n        dataset = COCODataset(\n                data_dir='data/COCO/',\n                json_file=self.train_ann,\n                img_size=self.input_size,\n                preproc=TrainTransform(\n                    rgb_means=(0.485, 0.456, 0.406),\n                    std=(0.229, 0.224, 0.225),\n                    max_labels=50\n                ),\n        )\n\n        dataset = MosaicDetection(\n            dataset,\n            mosaic=not no_aug,\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=120\n            ),\n            degrees=self.degrees,\n            translate=self.translate,\n            scale=self.scale,\n            shear=self.shear,\n            perspective=self.perspective,\n        )\n\n        self.dataset = dataset\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n            sampler = InfiniteSampler(len(self.dataset), seed=self.seed if self.seed else 0)\n        else:\n            sampler = torch.utils.data.RandomSampler(self.dataset)\n\n        batch_sampler = YoloBatchSampler(\n            sampler=sampler,\n            batch_size=batch_size,\n            drop_last=False,\n            input_dimension=self.input_size,\n            mosaic=not no_aug\n        )\n\n        dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n        dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n        train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n        return train_loader\n"
  },
  {
    "path": "exps/default/yolox_l.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom yolox.exp import Exp as MyExp\n\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.depth = 1.0\n        self.width = 1.0\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n"
  },
  {
    "path": "exps/default/yolox_m.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom yolox.exp import Exp as MyExp\n\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.depth = 0.67\n        self.width = 0.75\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n"
  },
  {
    "path": "exps/default/yolox_s.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom yolox.exp import Exp as MyExp\n\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.depth = 0.33\n        self.width = 0.50\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n"
  },
  {
    "path": "exps/default/yolox_tiny.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom yolox.exp import Exp as MyExp\n\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.depth = 0.33\n        self.width = 0.375\n        self.scale = (0.5, 1.5)\n        self.random_size = (10, 20)\n        self.test_size = (416, 416)\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n        self.enable_mixup = False\n"
  },
  {
    "path": "exps/default/yolox_x.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom yolox.exp import Exp as MyExp\n\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.depth = 1.33\n        self.width = 1.25\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_det_dancetrack.py",
    "content": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.optim import AdamW\nfrom yolox.exp import Exp as MyExp\nfrom yolox.data import get_yolox_datadir\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.num_classes = 1\n        self.depth = 1.33\n        self.width = 1.25\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n        self.train_ann = \"train.json\"\n        self.val_ann = \"train.json\"\n        self.input_size = (896, 1600)\n        self.test_size = (896, 1600)\n        self.random_size = (18, 32)\n        self.max_epoch = 20\n        self.print_interval = 20\n        self.eval_interval = 40\n        self.no_aug_epochs = 5\n        self.basic_lr_per_img = 0.001 / 64.0\n        self.warmup_epochs = 1\n        self.task=\"detection\"\n        self.enable_mixup = True\n        self.seed=8823\n        self.conf_thresh=0.4\n        self.det_thresh=0.7\n        self.nms_thresh2d=0.75\n        self.nms_thresh3d=0.7\n        self.interval=5\n\n    def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n        from yolox.data import (\n            MOTDataset,\n            TrainTransform,\n            YoloBatchSampler,\n            DataLoader,\n            InfiniteSampler,\n            MosaicDetection,\n        )\n\n        dataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"dancetrack\"),\n            json_file=self.train_ann,\n            name='train',\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=500,\n            ),\n        )\n\n        dataset = MosaicDetection(\n            dataset,\n            mosaic=not no_aug,\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            ),\n            degrees=self.degrees, \n            translate=self.translate,\n            scale=self.scale,\n            shear=self.shear,\n            perspective=self.perspective,\n            enable_mixup=self.enable_mixup,\n        )\n\n        self.dataset = dataset\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n\n        sampler = InfiniteSampler(\n            len(self.dataset), seed=self.seed if self.seed else 0\n        )\n\n        batch_sampler = YoloBatchSampler(\n            sampler=sampler,\n            batch_size=batch_size,\n            drop_last=False,\n            input_dimension=self.input_size,\n            mosaic=not no_aug,\n        )\n\n        dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n        dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n        train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n        return train_loader\n\n    def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n        from yolox.data import MOTDataset,DiffusionValTransform\n\n        valdataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"dancetrack\"),\n            json_file=self.val_ann,\n            img_size=self.test_size,\n            name='train',\n            preproc=DiffusionValTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            )\n        )\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n            sampler = torch.utils.data.distributed.DistributedSampler(\n                valdataset, shuffle=False\n            )\n        else:\n            sampler = torch.utils.data.SequentialSampler(valdataset)\n\n        dataloader_kwargs = {\n            \"num_workers\": self.data_num_workers,\n            \"pin_memory\": True,\n            \"sampler\": sampler,\n        }\n        dataloader_kwargs[\"batch_size\"] = batch_size\n        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n        return val_loader\n\n    def get_evaluator(self, batch_size, is_distributed, testdev=False):\n        from yolox.evaluators import COCOEvaluator\n\n        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n        evaluator = COCOEvaluator(\n            dataloader=val_loader,\n            img_size=self.test_size,\n            confthre=self.conf_thresh,\n            nmsthre3d=self.nms_thresh3d,\n            detthre=self.det_thresh,\n            nmsthre2d=self.nms_thresh2d,\n            num_classes=self.num_classes,\n            testdev=testdev,\n        )\n        return evaluator\n    \n    def get_model(self):\n        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead\n        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead\n\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n\n        if getattr(self, \"model\", None) is None:\n            in_channels = [256, 512, 1024]\n            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\n            for value in backbone.parameters():\n                value.requires_grad=False\n            head=DiffusionHead(self.num_classes,self.width)\n            self.model = DiffusionNet(backbone, head)\n\n        self.model.apply(init_yolo)\n        # self.model.head.initialize_biases(1e-2)\n        return self.model\n\n    def get_optimizer(self, batch_size):\n        lr=2.5e-05\n        weight_decay = 0.0001\n        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) \n        return self.optimizer\n"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_det_mot17.py",
    "content": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.optim import AdamW\nfrom yolox.exp import Exp as MyExp\nfrom yolox.data import get_yolox_datadir\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.num_classes = 1\n        self.depth = 1.33\n        self.width = 1.25\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n        self.train_ann = \"train.json\"\n        self.val_ann = \"val_half.json\"\n        self.input_size = (800, 1440)\n        self.test_size = (800, 1440)\n        self.random_size = (18, 32)\n        self.max_epoch = 30\n        self.print_interval = 20\n        self.eval_interval = 5\n        self.no_aug_epochs = 10\n        self.basic_lr_per_img = 0.001 / 64.0\n        self.warmup_epochs = 1\n        self.task=\"detection\"\n        self.enable_mixup = True\n        self.seed=8823\n        self.conf_thresh=0.4\n        self.det_thresh=0.7\n        self.nms_thresh2d=0.75\n        self.nms_thresh3d=0.7\n        self.interval=5\n\n    def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n        from yolox.data import (\n            MOTDataset,\n            TrainTransform,\n            YoloBatchSampler,\n            DataLoader,\n            InfiniteSampler,\n            MosaicDetection,\n        )\n\n        dataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"mix_det\"),\n            json_file=self.train_ann,\n            name='',\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=500,\n            ),\n        )\n\n        dataset = MosaicDetection(\n            dataset,\n            mosaic=not no_aug,\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            ),\n            degrees=self.degrees, \n            translate=self.translate,\n            scale=self.scale,\n            shear=self.shear,\n            perspective=self.perspective,\n            enable_mixup=self.enable_mixup,\n        )\n\n        self.dataset = dataset\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n\n        sampler = InfiniteSampler(\n            len(self.dataset), seed=self.seed if self.seed else 0\n        )\n\n        batch_sampler = YoloBatchSampler(\n            sampler=sampler,\n            batch_size=batch_size,\n            drop_last=False,\n            input_dimension=self.input_size,\n            mosaic=not no_aug,\n        )\n\n        dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n        dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n        train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n        return train_loader\n\n    def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n        from yolox.data import MOTDataset,DiffusionValTransform\n\n        valdataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"mot\"),\n            json_file=self.val_ann,\n            img_size=self.test_size,\n            name='train',\n            preproc=DiffusionValTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            )\n        )\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n            sampler = torch.utils.data.distributed.DistributedSampler(\n                valdataset, shuffle=False\n            )\n        else:\n            sampler = torch.utils.data.SequentialSampler(valdataset)\n\n        dataloader_kwargs = {\n            \"num_workers\": self.data_num_workers,\n            \"pin_memory\": True,\n            \"sampler\": sampler,\n        }\n        dataloader_kwargs[\"batch_size\"] = batch_size\n        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n        return val_loader\n\n    def get_evaluator(self, batch_size, is_distributed, testdev=False):\n        from yolox.evaluators import COCOEvaluator\n\n        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n        evaluator = COCOEvaluator(\n            dataloader=val_loader,\n            img_size=self.test_size,\n            confthre=self.conf_thresh,\n            nmsthre3d=self.nms_thresh3d,\n            detthre=self.det_thresh,\n            nmsthre2d=self.nms_thresh2d,\n            num_classes=self.num_classes,\n            testdev=testdev,\n        )\n        return evaluator\n    \n    def get_model(self):\n        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead\n        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead\n\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n\n        if getattr(self, \"model\", None) is None:\n            in_channels = [256, 512, 1024]\n            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\n            for value in backbone.parameters():\n                value.requires_grad=False\n            head=DiffusionHead(self.num_classes,self.width)\n            self.model = DiffusionNet(backbone, head)\n\n        self.model.apply(init_yolo)\n        # self.model.head.initialize_biases(1e-2)\n        return self.model\n\n    def get_optimizer(self, batch_size):\n        lr=2.5e-05\n        weight_decay = 0.0001\n        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) \n        return self.optimizer\n"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_det_mot17_ablation.py",
    "content": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.optim import AdamW\nfrom yolox.exp import Exp as MyExp\nfrom yolox.data import get_yolox_datadir\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.num_classes = 1\n        self.depth = 1.33\n        self.width = 1.25\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n        self.train_ann = \"train_half.json\"\n        self.val_ann = \"val_half.json\"\n        self.input_size = (800, 1440)\n        self.test_size = (800, 1440)\n        self.random_size = (18, 32)\n        self.max_epoch = 30\n        self.print_interval = 20\n        self.eval_interval = 5\n        self.no_aug_epochs = 10\n        self.basic_lr_per_img = 0.001 / 64.0\n        self.warmup_epochs = 1\n        self.task=\"detection\"\n        self.enable_mixup = True\n        self.seed=8823\n        self.conf_thresh=0.4\n        self.det_thresh=0.7\n        self.nms_thresh2d=0.75\n        self.nms_thresh3d=0.7\n        self.interval=5\n\n    def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n        from yolox.data import (\n            MOTDataset,\n            TrainTransform,\n            YoloBatchSampler,\n            DataLoader,\n            InfiniteSampler,\n            MosaicDetection,\n        )\n\n        dataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"mix_mot_ch\"),\n            json_file=self.train_ann,\n            name='',\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=500,\n            ),\n        )\n\n        dataset = MosaicDetection(\n            dataset,\n            mosaic=not no_aug,\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            ),\n            degrees=self.degrees, \n            translate=self.translate,\n            scale=self.scale,\n            shear=self.shear,\n            perspective=self.perspective,\n            enable_mixup=self.enable_mixup,\n        )\n\n        self.dataset = dataset\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n\n        sampler = InfiniteSampler(\n            len(self.dataset), seed=self.seed if self.seed else 0\n        )\n\n        batch_sampler = YoloBatchSampler(\n            sampler=sampler,\n            batch_size=batch_size,\n            drop_last=False,\n            input_dimension=self.input_size,\n            mosaic=not no_aug,\n        )\n\n        dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n        dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n        train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n        return train_loader\n\n    def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n        from yolox.data import MOTDataset,DiffusionValTransform\n\n        valdataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"mot\"),\n            json_file=self.val_ann,\n            img_size=self.test_size,\n            name='train',\n            preproc=DiffusionValTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            )\n        )\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n            sampler = torch.utils.data.distributed.DistributedSampler(\n                valdataset, shuffle=False\n            )\n        else:\n            sampler = torch.utils.data.SequentialSampler(valdataset)\n\n        dataloader_kwargs = {\n            \"num_workers\": self.data_num_workers,\n            \"pin_memory\": True,\n            \"sampler\": sampler,\n        }\n        dataloader_kwargs[\"batch_size\"] = batch_size\n        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n        return val_loader\n\n    def get_evaluator(self, batch_size, is_distributed, testdev=False):\n        from yolox.evaluators import COCOEvaluator\n\n        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n        evaluator = COCOEvaluator(\n            dataloader=val_loader,\n            img_size=self.test_size,\n            confthre=self.conf_thresh,\n            nmsthre3d=self.nms_thresh3d,\n            detthre=self.det_thresh,\n            nmsthre2d=self.nms_thresh2d,\n            num_classes=self.num_classes,\n            testdev=testdev,\n        )\n        return evaluator\n    \n    def get_model(self):\n        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead\n        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead\n\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n\n        if getattr(self, \"model\", None) is None:\n            in_channels = [256, 512, 1024]\n            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\n            for value in backbone.parameters():\n                value.requires_grad=False\n            head=DiffusionHead(self.num_classes,self.width)\n            self.model = DiffusionNet(backbone, head)\n\n        self.model.apply(init_yolo)\n        # self.model.head.initialize_biases(1e-2)\n        return self.model\n\n    def get_optimizer(self, batch_size):\n        lr=2.5e-05\n        weight_decay = 0.0001\n        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) \n        return self.optimizer\n"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_det_mot20.py",
    "content": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.optim import AdamW\nfrom yolox.exp import Exp as MyExp\nfrom yolox.data import get_yolox_datadir\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.num_classes = 1\n        self.depth = 1.33\n        self.width = 1.25\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n        self.train_ann = \"train.json\"\n        self.val_ann = \"val_half.json\"\n        self.input_size = (896, 1600)\n        self.test_size = (896, 1600)\n        self.random_size = (20, 36)\n        self.max_epoch = 30\n        self.print_interval = 20\n        self.eval_interval = 5\n        self.no_aug_epochs = 10\n        self.basic_lr_per_img = 0.001 / 64.0\n        self.warmup_epochs = 1\n        self.task=\"detection\"\n        self.enable_mixup = True\n        self.seed=8823\n        self.conf_thresh=0.4\n        self.det_thresh=0.7\n        self.nms_thresh2d=0.75\n        self.nms_thresh3d=0.7\n        self.interval=5\n\n    def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n        from yolox.data import (\n            MOTDataset,\n            TrainTransform,\n            YoloBatchSampler,\n            DataLoader,\n            InfiniteSampler,\n            MosaicDetection,\n        )\n\n        dataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"mix_mot20_ch\"),\n            json_file=self.train_ann,\n            name='',\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=500,\n            ),\n        )\n\n        dataset = MosaicDetection(\n            dataset,\n            mosaic=not no_aug,\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1200, \n            ),\n            degrees=self.degrees, \n            translate=self.translate,\n            scale=self.scale,\n            shear=self.shear,\n            perspective=self.perspective,\n            enable_mixup=self.enable_mixup,\n        )\n\n        self.dataset = dataset\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n\n        sampler = InfiniteSampler(\n            len(self.dataset), seed=self.seed if self.seed else 0\n        )\n\n        batch_sampler = YoloBatchSampler(\n            sampler=sampler,\n            batch_size=batch_size,\n            drop_last=False,\n            input_dimension=self.input_size,\n            mosaic=not no_aug,\n        )\n\n        dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n        dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n        train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n        return train_loader\n\n    def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n        from yolox.data import MOTDataset,DiffusionValTransform\n\n        valdataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"MOT20\"),\n            json_file=self.val_ann,\n            img_size=self.test_size,\n            name='train',\n            preproc=DiffusionValTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1200, \n            )\n        )\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n            sampler = torch.utils.data.distributed.DistributedSampler(\n                valdataset, shuffle=False\n            )\n        else:\n            sampler = torch.utils.data.SequentialSampler(valdataset)\n\n        dataloader_kwargs = {\n            \"num_workers\": self.data_num_workers,\n            \"pin_memory\": True,\n            \"sampler\": sampler,\n        }\n        dataloader_kwargs[\"batch_size\"] = batch_size\n        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n        return val_loader\n\n    def get_evaluator(self, batch_size, is_distributed, testdev=False):\n        from yolox.evaluators import COCOEvaluator\n\n        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n        evaluator = COCOEvaluator(\n            dataloader=val_loader,\n            img_size=self.test_size,\n            confthre=self.conf_thresh,\n            nmsthre3d=self.nms_thresh3d,\n            detthre=self.det_thresh,\n            nmsthre2d=self.nms_thresh2d,\n            num_classes=self.num_classes,\n            testdev=testdev,\n        )\n        return evaluator\n    \n    def get_model(self):\n        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead\n        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead\n\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n\n        if getattr(self, \"model\", None) is None:\n            in_channels = [256, 512, 1024]\n            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\n            for value in backbone.parameters():\n                value.requires_grad=False\n            head=DiffusionHead(self.num_classes,self.width)\n            self.model = DiffusionNet(backbone, head)\n\n        self.model.apply(init_yolo)\n        # self.model.head.initialize_biases(1e-2)\n        return self.model\n\n    def get_optimizer(self, batch_size):\n        lr=2.5e-05\n        weight_decay = 0.0001\n        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) \n        return self.optimizer\n"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_dancetrack.py",
    "content": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.optim import AdamW\nfrom yolox.exp import Exp as MyExp\nfrom yolox.data import get_yolox_datadir\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.num_classes = 1\n        self.depth = 1.33\n        self.width = 1.25\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n        self.train_ann = \"train.json\"\n        self.val_ann = \"train.json\"\n        self.input_size = (896, 1600)\n        self.test_size = (896, 1600)\n        self.random_size = (18, 32)\n        self.max_epoch = 20\n        self.print_interval = 20 \n        self.eval_interval = 40\n        self.no_aug_epochs = 5\n        self.basic_lr_per_img = 0.001 / 64.0\n        self.warmup_epochs = 1\n        self.task=\"tracking\"\n        self.seed=8823\n        self.conf_thresh=0.4\n        self.det_thresh=0.7\n        self.nms_thresh2d=0.75\n        self.nms_thresh3d=0.7\n        self.interval=5\n\n    def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n        from yolox.data import (\n            MOTDataset,\n            TrainTransform,\n            YoloBatchSampler,\n            DataLoader,\n            InfiniteSampler,\n            MosaicDetection,\n            DiffusionMosaicDetection,\n            DiffusionTrainTransform\n        )\n\n        dataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"dancetrack\"),\n            json_file=self.train_ann,\n            name='train',\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=500,\n            ),\n        )\n\n        dataset = DiffusionMosaicDetection(\n            dataset,\n            mosaic=not no_aug,\n            img_size=self.input_size,\n            preproc=DiffusionTrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            ),\n            degrees=self.degrees, \n            translate=self.translate,\n            scale=self.scale,\n            shear=self.shear,\n            perspective=self.perspective,\n            enable_mixup=self.enable_mixup,\n        )\n\n        self.dataset = dataset\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n\n        sampler = InfiniteSampler(\n            len(self.dataset), seed=self.seed if self.seed else 0\n        )\n\n        batch_sampler = YoloBatchSampler(\n            sampler=sampler,\n            batch_size=batch_size,\n            drop_last=False,\n            input_dimension=self.input_size,\n            mosaic=not no_aug,\n        )\n\n        dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n        dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n        train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n        return train_loader\n\n    def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n        from yolox.data import MOTDataset,DiffusionValTransform\n\n        valdataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"dancetrack\"),\n            json_file=self.val_ann,\n            img_size=self.test_size,\n            name='train',\n            preproc=DiffusionValTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            )\n        )\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n            sampler = torch.utils.data.distributed.DistributedSampler(\n                valdataset, shuffle=False\n            )\n        else:\n            sampler = torch.utils.data.SequentialSampler(valdataset)\n\n        dataloader_kwargs = {\n            \"num_workers\": self.data_num_workers,\n            \"pin_memory\": True,\n            \"sampler\": sampler,\n        }\n        dataloader_kwargs[\"batch_size\"] = batch_size\n        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n        return val_loader\n\n    def get_evaluator(self, batch_size, is_distributed, testdev=False):\n        from yolox.evaluators import COCOEvaluator\n\n        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n        evaluator = COCOEvaluator(\n            dataloader=val_loader,\n            img_size=self.test_size,\n            confthre=self.conf_thresh,\n            nmsthre3d=self.nms_thresh3d,\n            detthre=self.det_thresh,\n            nmsthre2d=self.nms_thresh2d,\n            num_classes=self.num_classes,\n            testdev=testdev,\n        )\n        return evaluator\n    \n    def get_model(self):\n        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead\n        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead\n\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n\n        if getattr(self, \"model\", None) is None:\n            in_channels = [256, 512, 1024]\n            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\n            for value in backbone.parameters():\n                value.requires_grad=False\n            head=DiffusionHead(self.num_classes,self.width)\n            self.model = DiffusionNet(backbone, head)\n\n        self.model.apply(init_yolo)\n        # self.model.head.initialize_biases(1e-2)\n        return self.model\n\n    def get_optimizer(self, batch_size):\n        lr=2.5e-05\n        weight_decay = 0.0001\n        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) \n        return self.optimizer\n"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_dancetrack_baseline.py",
    "content": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.optim import AdamW\nfrom yolox.exp import Exp as MyExp\nfrom yolox.data import get_yolox_datadir\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.num_classes = 1\n        self.depth = 1.33\n        self.width = 1.25\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n        self.train_ann = \"train.json\"\n        self.val_ann = \"test.json\"\n        self.input_size = (896, 1600)\n        self.test_size = (896, 1600)\n        self.random_size = (18, 32)\n        self.max_epoch = 20\n        self.print_interval = 20 \n        self.eval_interval = 40\n        self.no_aug_epochs = 5\n        self.basic_lr_per_img = 0.001 / 64.0\n        self.warmup_epochs = 1\n        self.task=\"tracking\"\n        self.seed=8823\n        self.conf_thresh=0.4\n        self.det_thresh=0.7\n        self.nms_thresh2d=0.75\n        self.nms_thresh3d=0.7\n        self.interval=5\n\n    def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n        from yolox.data import (\n            MOTDataset,\n            TrainTransform,\n            YoloBatchSampler,\n            DataLoader,\n            InfiniteSampler,\n            MosaicDetection,\n            DiffusionMosaicDetection,\n            DiffusionTrainTransform\n        )\n\n        dataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"dancetrack\"),\n            json_file=self.train_ann,\n            name='train',\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=500,\n            ),\n        )\n\n        dataset = DiffusionMosaicDetection(\n            dataset,\n            mosaic=not no_aug,\n            img_size=self.input_size,\n            preproc=DiffusionTrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            ),\n            degrees=self.degrees, \n            translate=self.translate,\n            scale=self.scale,\n            shear=self.shear,\n            perspective=self.perspective,\n            enable_mixup=self.enable_mixup,\n        )\n\n        self.dataset = dataset\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n\n        sampler = InfiniteSampler(\n            len(self.dataset), seed=self.seed if self.seed else 0\n        )\n\n        batch_sampler = YoloBatchSampler(\n            sampler=sampler,\n            batch_size=batch_size,\n            drop_last=False,\n            input_dimension=self.input_size,\n            mosaic=not no_aug,\n        )\n\n        dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n        dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n        train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n        return train_loader\n\n    def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n        from yolox.data import MOTDataset,DiffusionValTransform\n\n        valdataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"dancetrack\"),\n            json_file=self.val_ann,\n            img_size=self.test_size,\n            name='test',\n            preproc=DiffusionValTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            )\n        )\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n            sampler = torch.utils.data.distributed.DistributedSampler(\n                valdataset, shuffle=False\n            )\n        else:\n            sampler = torch.utils.data.SequentialSampler(valdataset)\n\n        dataloader_kwargs = {\n            \"num_workers\": self.data_num_workers,\n            \"pin_memory\": True,\n            \"sampler\": sampler,\n        }\n        dataloader_kwargs[\"batch_size\"] = batch_size\n        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n        return val_loader\n\n    def get_evaluator(self, batch_size, is_distributed, testdev=False):\n        from yolox.evaluators import COCOEvaluator\n\n        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n        evaluator = COCOEvaluator(\n            dataloader=val_loader,\n            img_size=self.test_size,\n            confthre=self.conf_thresh,\n            nmsthre3d=self.nms_thresh3d,\n            detthre=self.det_thresh,\n            nmsthre2d=self.nms_thresh2d,\n            num_classes=self.num_classes,\n            testdev=testdev,\n        )\n        return evaluator\n    \n    def get_model(self):\n        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead\n        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead\n\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n\n        if getattr(self, \"model\", None) is None:\n            in_channels = [256, 512, 1024]\n            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\n            for value in backbone.parameters():\n                value.requires_grad=False\n            head=DiffusionHead(self.num_classes,self.width)\n            self.model = DiffusionNet(backbone, head)\n\n        self.model.apply(init_yolo)\n        # self.model.head.initialize_biases(1e-2)\n        return self.model\n\n    def get_optimizer(self, batch_size):\n        lr=2.5e-05\n        weight_decay = 0.0001\n        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) \n        return self.optimizer\n"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_mot17.py",
    "content": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.optim import AdamW\nfrom yolox.exp import Exp as MyExp\nfrom yolox.data import get_yolox_datadir\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.num_classes = 1\n        self.depth = 1.33\n        self.width = 1.25\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n        self.train_ann = \"train.json\"\n        self.val_ann = \"val_half.json\"\n        self.input_size = (800, 1440)\n        self.test_size = (800, 1440)\n        self.random_size = (18, 32)\n        self.max_epoch = 30\n        self.print_interval = 20 \n        self.eval_interval = 5\n        self.no_aug_epochs = 10\n        self.basic_lr_per_img = 0.001 / 64.0\n        self.warmup_epochs = 1\n        self.task=\"tracking\"\n        self.seed=8823\n        self.conf_thresh=0.4\n        self.det_thresh=0.7\n        self.nms_thresh2d=0.75\n        self.nms_thresh3d=0.7\n        self.interval=5\n\n    def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n        from yolox.data import (\n            MOTDataset,\n            TrainTransform,\n            YoloBatchSampler,\n            DataLoader,\n            InfiniteSampler,\n            MosaicDetection,\n            DiffusionMosaicDetection,\n            DiffusionTrainTransform\n        )\n\n        dataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"mot\"),\n            json_file=self.train_ann,\n            name='train',\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=500,\n            ),\n        )\n\n        dataset = DiffusionMosaicDetection(\n            dataset,\n            mosaic=not no_aug,\n            img_size=self.input_size,\n            preproc=DiffusionTrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            ),\n            degrees=self.degrees, \n            translate=self.translate,\n            scale=self.scale,\n            shear=self.shear,\n            perspective=self.perspective,\n            enable_mixup=self.enable_mixup,\n        )\n\n        self.dataset = dataset\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n\n        sampler = InfiniteSampler(\n            len(self.dataset), seed=self.seed if self.seed else 0\n        )\n\n        batch_sampler = YoloBatchSampler(\n            sampler=sampler,\n            batch_size=batch_size,\n            drop_last=False,\n            input_dimension=self.input_size,\n            mosaic=not no_aug,\n        )\n\n        dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n        dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n        train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n        return train_loader\n\n    def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n        from yolox.data import MOTDataset,DiffusionValTransform\n\n        valdataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"mot\"),\n            json_file=self.val_ann,\n            img_size=self.test_size,\n            name='train',\n            preproc=DiffusionValTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            )\n        )\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n            sampler = torch.utils.data.distributed.DistributedSampler(\n                valdataset, shuffle=False\n            )\n        else:\n            sampler = torch.utils.data.SequentialSampler(valdataset)\n\n        dataloader_kwargs = {\n            \"num_workers\": self.data_num_workers,\n            \"pin_memory\": True,\n            \"sampler\": sampler,\n        }\n        dataloader_kwargs[\"batch_size\"] = batch_size\n        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n        return val_loader\n\n    def get_evaluator(self, batch_size, is_distributed, testdev=False):\n        from yolox.evaluators import COCOEvaluator\n\n        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n        evaluator = COCOEvaluator(\n            dataloader=val_loader,\n            img_size=self.test_size,\n            confthre=self.conf_thresh,\n            nmsthre3d=self.nms_thresh3d,\n            detthre=self.det_thresh,\n            nmsthre2d=self.nms_thresh2d,\n            num_classes=self.num_classes,\n            testdev=testdev,\n        )\n        return evaluator\n    \n    def get_model(self):\n        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead\n        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead\n\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n\n        if getattr(self, \"model\", None) is None:\n            in_channels = [256, 512, 1024]\n            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\n            for value in backbone.parameters():\n                value.requires_grad=False\n            head=DiffusionHead(self.num_classes,self.width)\n            self.model = DiffusionNet(backbone, head)\n\n        self.model.apply(init_yolo)\n        # self.model.head.initialize_biases(1e-2)\n        return self.model\n\n    def get_optimizer(self, batch_size):\n        lr=2.5e-05\n        weight_decay = 0.0001\n        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) \n        return self.optimizer\n"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_mot17_ablation.py",
    "content": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.optim import AdamW\nfrom yolox.exp import Exp as MyExp\nfrom yolox.data import get_yolox_datadir\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.num_classes = 1\n        self.depth = 1.33\n        self.width = 1.25\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n        self.train_ann = \"train_half.json\"\n        self.val_ann = \"val_half.json\"\n        self.input_size = (800, 1440)\n        self.test_size = (800, 1440)\n        self.random_size = (18, 32)\n        self.max_epoch = 30\n        self.print_interval = 20\n        self.eval_interval = 5\n        self.no_aug_epochs = 10\n        self.basic_lr_per_img = 0.001 / 64.0\n        self.warmup_epochs = 1\n        self.task=\"tracking\"\n        self.enable_mixup = True\n        self.seed=8823\n        self.conf_thresh=0.25\n        self.det_thresh=0.7\n        self.nms_thresh2d=0.75\n        self.nms_thresh3d=0.7\n        self.interval=5\n\n    def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n        from yolox.data import (\n            MOTDataset,\n            TrainTransform,\n            YoloBatchSampler,\n            DataLoader,\n            InfiniteSampler,\n            MosaicDetection,\n            DiffusionMosaicDetection,\n            DiffusionTrainTransform\n        )\n\n        dataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"mot\"),\n            json_file=self.train_ann,\n            name='train',\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=500,\n            ),\n        )\n\n        dataset = DiffusionMosaicDetection(\n            dataset,\n            mosaic=not no_aug,\n            img_size=self.input_size,\n            preproc=DiffusionTrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            ),\n            degrees=self.degrees, \n            translate=self.translate,\n            scale=self.scale,\n            shear=self.shear,\n            perspective=self.perspective,\n            enable_mixup=self.enable_mixup,\n        )\n\n        self.dataset = dataset\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n\n        sampler = InfiniteSampler(\n            len(self.dataset), seed=self.seed if self.seed else 0\n        )\n\n        batch_sampler = YoloBatchSampler(\n            sampler=sampler,\n            batch_size=batch_size,\n            drop_last=False,\n            input_dimension=self.input_size,\n            mosaic=not no_aug,\n        )\n\n        dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n        dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n        train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n        return train_loader\n\n    def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n        from yolox.data import MOTDataset,DiffusionValTransform\n\n        valdataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"mot\"),\n            json_file=self.val_ann,\n            img_size=self.test_size,\n            name='train',\n            preproc=DiffusionValTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            )\n        )\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n            sampler = torch.utils.data.distributed.DistributedSampler(\n                valdataset, shuffle=False\n            )\n        else:\n            sampler = torch.utils.data.SequentialSampler(valdataset)\n\n        dataloader_kwargs = {\n            \"num_workers\": self.data_num_workers,\n            \"pin_memory\": True,\n            \"sampler\": sampler,\n        }\n        dataloader_kwargs[\"batch_size\"] = batch_size\n        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n        return val_loader\n\n    def get_evaluator(self, batch_size, is_distributed, testdev=False):\n        from yolox.evaluators import COCOEvaluator\n\n        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n        evaluator = COCOEvaluator(\n            dataloader=val_loader,\n            img_size=self.test_size,\n            confthre=self.conf_thresh,\n            nmsthre3d=self.nms_thresh3d,\n            detthre=self.det_thresh,\n            nmsthre2d=self.nms_thresh2d,\n            num_classes=self.num_classes,\n            testdev=testdev,\n        )\n        return evaluator\n    \n    def get_model(self):\n        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead\n        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead\n\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n\n        if getattr(self, \"model\", None) is None:\n            in_channels = [256, 512, 1024]\n            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\n            for value in backbone.parameters():\n                value.requires_grad=False\n            head=DiffusionHead(self.num_classes,self.width)\n            self.model = DiffusionNet(backbone, head)\n\n        self.model.apply(init_yolo)\n        # self.model.head.initialize_biases(1e-2)\n        return self.model\n\n    def get_optimizer(self, batch_size):\n        lr=2.5e-05\n        weight_decay = 0.0001\n        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) \n        return self.optimizer\n"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_mot17_baseline.py",
    "content": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.optim import AdamW\nfrom yolox.exp import Exp as MyExp\nfrom yolox.data import get_yolox_datadir\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.num_classes = 1\n        self.depth = 1.33\n        self.width = 1.25\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n        self.train_ann = \"train.json\"\n        self.val_ann = \"test.json\"\n        self.input_size = (800, 1440)\n        self.test_size = (800, 1440)\n        self.random_size = (18, 32)\n        self.max_epoch = 30\n        self.print_interval = 20 \n        self.eval_interval = 5\n        self.no_aug_epochs = 10\n        self.basic_lr_per_img = 0.001 / 64.0\n        self.warmup_epochs = 1\n        self.task=\"tracking\"\n        self.seed=8823\n        self.conf_thresh=0.4\n        self.det_thresh=0.7\n        self.nms_thresh2d=0.75\n        self.nms_thresh3d=0.7\n        self.interval=5\n\n    def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n        from yolox.data import (\n            MOTDataset,\n            TrainTransform,\n            YoloBatchSampler,\n            DataLoader,\n            InfiniteSampler,\n            MosaicDetection,\n            DiffusionMosaicDetection,\n            DiffusionTrainTransform\n        )\n\n        dataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"mot\"),\n            json_file=self.train_ann,\n            name='train',\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=500,\n            ),\n        )\n\n        dataset = DiffusionMosaicDetection(\n            dataset,\n            mosaic=not no_aug,\n            img_size=self.input_size,\n            preproc=DiffusionTrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            ),\n            degrees=self.degrees, \n            translate=self.translate,\n            scale=self.scale,\n            shear=self.shear,\n            perspective=self.perspective,\n            enable_mixup=self.enable_mixup,\n        )\n\n        self.dataset = dataset\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n\n        sampler = InfiniteSampler(\n            len(self.dataset), seed=self.seed if self.seed else 0\n        )\n\n        batch_sampler = YoloBatchSampler(\n            sampler=sampler,\n            batch_size=batch_size,\n            drop_last=False,\n            input_dimension=self.input_size,\n            mosaic=not no_aug,\n        )\n\n        dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n        dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n        train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n        return train_loader\n\n    def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n        from yolox.data import MOTDataset,DiffusionValTransform\n\n        valdataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"mot\"),\n            json_file=self.val_ann,\n            img_size=self.test_size,\n            name='test',\n            preproc=DiffusionValTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1000, \n            )\n        )\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n            sampler = torch.utils.data.distributed.DistributedSampler(\n                valdataset, shuffle=False\n            )\n        else:\n            sampler = torch.utils.data.SequentialSampler(valdataset)\n\n        dataloader_kwargs = {\n            \"num_workers\": self.data_num_workers,\n            \"pin_memory\": True,\n            \"sampler\": sampler,\n        }\n        dataloader_kwargs[\"batch_size\"] = batch_size\n        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n        return val_loader\n\n    def get_evaluator(self, batch_size, is_distributed, testdev=False):\n        from yolox.evaluators import COCOEvaluator\n\n        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n        evaluator = COCOEvaluator(\n            dataloader=val_loader,\n            img_size=self.test_size,\n            confthre=self.conf_thresh,\n            nmsthre3d=self.nms_thresh3d,\n            detthre=self.det_thresh,\n            nmsthre2d=self.nms_thresh2d,\n            num_classes=self.num_classes,\n            testdev=testdev,\n        )\n        return evaluator\n    \n    def get_model(self):\n        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead\n        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead\n\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n\n        if getattr(self, \"model\", None) is None:\n            in_channels = [256, 512, 1024]\n            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\n            for value in backbone.parameters():\n                value.requires_grad=False\n            head=DiffusionHead(self.num_classes,self.width)\n            self.model = DiffusionNet(backbone, head)\n\n        self.model.apply(init_yolo)\n        # self.model.head.initialize_biases(1e-2)\n        return self.model\n\n    def get_optimizer(self, batch_size):\n        lr=2.5e-05\n        weight_decay = 0.0001\n        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) \n        return self.optimizer\n"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_mot20.py",
    "content": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.optim import AdamW\nfrom yolox.exp import Exp as MyExp\nfrom yolox.data import get_yolox_datadir\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.num_classes = 1\n        self.depth = 1.33\n        self.width = 1.25\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n        self.train_ann = \"train.json\"\n        self.val_ann = \"val_half.json\"\n        self.input_size = (896, 1600)\n        self.test_size = (896, 1600)\n        self.random_size = (20, 36)\n        self.max_epoch = 30\n        self.print_interval = 20\n        self.eval_interval = 5\n        self.no_aug_epochs = 10\n        self.basic_lr_per_img = 0.001 / 64.0\n        self.warmup_epochs = 1\n        self.task=\"tracking\"\n        self.seed=8823\n        self.conf_thresh=0.4\n        self.det_thresh=0.7\n        self.nms_thresh2d=0.75\n        self.nms_thresh3d=0.7\n        self.interval=5\n\n    def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n        from yolox.data import (\n            MOTDataset,\n            TrainTransform,\n            YoloBatchSampler,\n            DataLoader,\n            InfiniteSampler,\n            MosaicDetection,\n            DiffusionMosaicDetection,\n            DiffusionTrainTransform\n        )\n\n        dataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"MOT20\"),\n            json_file=self.train_ann,\n            name='train',\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=500,\n            ),\n        )\n\n        dataset = DiffusionMosaicDetection(\n            dataset,\n            mosaic=not no_aug,\n            img_size=self.input_size,\n            preproc=DiffusionTrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1200, \n            ),\n            degrees=self.degrees, \n            translate=self.translate,\n            scale=self.scale,\n            shear=self.shear,\n            perspective=self.perspective,\n            enable_mixup=self.enable_mixup,\n        )\n\n        self.dataset = dataset\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n\n        sampler = InfiniteSampler(\n            len(self.dataset), seed=self.seed if self.seed else 0\n        )\n\n        batch_sampler = YoloBatchSampler(\n            sampler=sampler,\n            batch_size=batch_size,\n            drop_last=False,\n            input_dimension=self.input_size,\n            mosaic=not no_aug,\n        )\n\n        dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n        dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n        train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n        return train_loader\n\n    def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n        from yolox.data import MOTDataset,DiffusionValTransform\n\n        valdataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"MOT20\"),\n            json_file=self.val_ann,\n            img_size=self.test_size,\n            name='train',\n            preproc=DiffusionValTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1200, \n            )\n        )\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n            sampler = torch.utils.data.distributed.DistributedSampler(\n                valdataset, shuffle=False\n            )\n        else:\n            sampler = torch.utils.data.SequentialSampler(valdataset)\n\n        dataloader_kwargs = {\n            \"num_workers\": self.data_num_workers,\n            \"pin_memory\": True,\n            \"sampler\": sampler,\n        }\n        dataloader_kwargs[\"batch_size\"] = batch_size\n        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n        return val_loader\n\n    def get_evaluator(self, batch_size, is_distributed, testdev=False):\n        from yolox.evaluators import COCOEvaluator\n\n        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n        evaluator = COCOEvaluator(\n            dataloader=val_loader,\n            img_size=self.test_size,\n            confthre=self.conf_thresh,\n            nmsthre3d=self.nms_thresh3d,\n            detthre=self.det_thresh,\n            nmsthre2d=self.nms_thresh2d,\n            num_classes=self.num_classes,\n            testdev=testdev,\n        )\n        return evaluator\n    \n    def get_model(self):\n        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead\n        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead\n\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n\n        if getattr(self, \"model\", None) is None:\n            in_channels = [256, 512, 1024]\n            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\n            for value in backbone.parameters():\n                value.requires_grad=False\n            head=DiffusionHead(self.num_classes,self.width)\n            self.model = DiffusionNet(backbone, head)\n\n        self.model.apply(init_yolo)\n        # self.model.head.initialize_biases(1e-2)\n        return self.model\n\n    def get_optimizer(self, batch_size):\n        lr=2.5e-05\n        weight_decay = 0.0001\n        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) \n        return self.optimizer\n"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_mot20_baseline.py",
    "content": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch.optim import AdamW\nfrom yolox.exp import Exp as MyExp\nfrom yolox.data import get_yolox_datadir\n\nclass Exp(MyExp):\n    def __init__(self):\n        super(Exp, self).__init__()\n        self.num_classes = 1\n        self.depth = 1.33\n        self.width = 1.25\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n        self.train_ann = \"train.json\"\n        self.val_ann = \"val_half.json\"\n        self.input_size = (896, 1600)\n        self.test_size = (896, 1600)\n        self.random_size = (20, 36)\n        self.max_epoch = 30\n        self.print_interval = 20\n        self.eval_interval = 5\n        self.no_aug_epochs = 10\n        self.basic_lr_per_img = 0.001 / 64.0\n        self.warmup_epochs = 1\n        self.task=\"tracking\"\n        self.seed=8823\n        self.conf_thresh=0.4\n        self.det_thresh=0.7\n        self.nms_thresh2d=0.75\n        self.nms_thresh3d=0.7\n        self.interval=5\n\n    def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n        from yolox.data import (\n            MOTDataset,\n            TrainTransform,\n            YoloBatchSampler,\n            DataLoader,\n            InfiniteSampler,\n            MosaicDetection,\n            DiffusionMosaicDetection,\n            DiffusionTrainTransform\n        )\n\n        dataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"MOT20\"),\n            json_file=self.train_ann,\n            name='train',\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=500,\n            ),\n        )\n\n        dataset = DiffusionMosaicDetection(\n            dataset,\n            mosaic=not no_aug,\n            img_size=self.input_size,\n            preproc=DiffusionTrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1200, \n            ),\n            degrees=self.degrees, \n            translate=self.translate,\n            scale=self.scale,\n            shear=self.shear,\n            perspective=self.perspective,\n            enable_mixup=self.enable_mixup,\n        )\n\n        self.dataset = dataset\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n\n        sampler = InfiniteSampler(\n            len(self.dataset), seed=self.seed if self.seed else 0\n        )\n\n        batch_sampler = YoloBatchSampler(\n            sampler=sampler,\n            batch_size=batch_size,\n            drop_last=False,\n            input_dimension=self.input_size,\n            mosaic=not no_aug,\n        )\n\n        dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n        dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n        train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n        return train_loader\n\n    def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n        from yolox.data import MOTDataset,DiffusionValTransform\n\n        valdataset = MOTDataset(\n            data_dir=os.path.join(get_yolox_datadir(), \"MOT20\"),\n            json_file=self.val_ann,\n            img_size=self.test_size,\n            name='train',\n            preproc=DiffusionValTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=1200, \n            )\n        )\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n            sampler = torch.utils.data.distributed.DistributedSampler(\n                valdataset, shuffle=False\n            )\n        else:\n            sampler = torch.utils.data.SequentialSampler(valdataset)\n\n        dataloader_kwargs = {\n            \"num_workers\": self.data_num_workers,\n            \"pin_memory\": True,\n            \"sampler\": sampler,\n        }\n        dataloader_kwargs[\"batch_size\"] = batch_size\n        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n        return val_loader\n\n    def get_evaluator(self, batch_size, is_distributed, testdev=False):\n        from yolox.evaluators import COCOEvaluator\n\n        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n        evaluator = COCOEvaluator(\n            dataloader=val_loader,\n            img_size=self.test_size,\n            confthre=self.conf_thresh,\n            nmsthre3d=self.nms_thresh3d,\n            detthre=self.det_thresh,\n            nmsthre2d=self.nms_thresh2d,\n            num_classes=self.num_classes,\n            testdev=testdev,\n        )\n        return evaluator\n    \n    def get_model(self):\n        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead\n        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead\n\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n\n        if getattr(self, \"model\", None) is None:\n            in_channels = [256, 512, 1024]\n            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\n            for value in backbone.parameters():\n                value.requires_grad=False\n            head=DiffusionHead(self.num_classes,self.width)\n            self.model = DiffusionNet(backbone, head)\n\n        self.model.apply(init_yolo)\n        # self.model.head.initialize_biases(1e-2)\n        return self.model\n\n    def get_optimizer(self, batch_size):\n        lr=2.5e-05\n        weight_decay = 0.0001\n        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) \n        return self.optimizer\n"
  },
  {
    "path": "requirements.txt",
    "content": "numpy\ntorch>=1.7\nopencv_python\nloguru\nscikit-image\ntqdm\ntorchvision>=0.10.0\nPillow\nthop\nninja\ntabulate\ntensorboard\nlap\nmotmetrics\nfilterpy\nh5py\n"
  },
  {
    "path": "setup.py",
    "content": "#!/usr/bin/env python\n# Copyright (c) Megvii, Inc. and its affiliates. All Rights Reserved\n\nimport re\nimport setuptools\nimport glob\nfrom os import path\nimport torch\nfrom torch.utils.cpp_extension import CppExtension\n\ntorch_ver = [int(x) for x in torch.__version__.split(\".\")[:2]]\nassert torch_ver >= [1, 3], \"Requires PyTorch >= 1.3\"\n\n\ndef get_extensions():\n    this_dir = path.dirname(path.abspath(__file__))\n    extensions_dir = path.join(this_dir, \"yolox\", \"layers\", \"csrc\")\n\n    main_source = path.join(extensions_dir, \"vision.cpp\")\n    sources = glob.glob(path.join(extensions_dir, \"**\", \"*.cpp\"))\n\n    sources = [main_source] + sources\n    extension = CppExtension\n\n    extra_compile_args = {\"cxx\": [\"-O3\"]}\n    define_macros = []\n\n    include_dirs = [extensions_dir]\n\n    ext_modules = [\n        extension(\n            \"yolox._C\",\n            sources,\n            include_dirs=include_dirs,\n            define_macros=define_macros,\n            extra_compile_args=extra_compile_args,\n        )\n    ]\n\n    return ext_modules\n\n\nwith open(\"yolox/__init__.py\", \"r\") as f:\n    version = re.search(\n        r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]',\n        f.read(), re.MULTILINE\n    ).group(1)\n\n\n# with open(\"README.md\", \"r\") as f:\n#     long_description = f.read()\n\nlong_description=\"sss\"\nsetuptools.setup(\n    name=\"yolox\",\n    version=version,\n    author=\"basedet team\",\n    python_requires=\">=3.6\",\n    long_description=long_description,\n    ext_modules=get_extensions(),\n    classifiers=[\"Programming Language :: Python :: 3\", \"Operating System :: OS Independent\"],\n    cmdclass={\"build_ext\": torch.utils.cpp_extension.BuildExtension},\n    packages=setuptools.find_namespace_packages(),\n)\n"
  },
  {
    "path": "tools/convert_bdd100k_to_coco.py",
    "content": "import cv2\nimport os\nimport json\nimport tqdm\nimport numpy as np\n\nlabels_path = 'datasets/bdd100k/labels'\nimg_path = 'datasets/bdd100k/images'\n# mot_labels_path  = '/data/yourname/BDD100K-MOT/GT'\n\nout_path = 'datasets/bdd100k/annotations/'\n\nsplit = ['train']\ncategories = [\n    {\"id\": 1, \"name\": \"pedestrian\"},\n    {\"id\": 2, \"name\": \"rider\"},\n    {\"id\": 3, \"name\": \"car\"},\n    {\"id\": 4, \"name\": \"truck\"},\n    {\"id\": 5, \"name\": \"bus\"},\n    {\"id\": 6, \"name\": \"train\"},\n    {\"id\": 7, \"name\": \"motorcycle\"},\n    {\"id\": 8, \"name\": \"bicycle\"},\n    # {\"id\": 9, \"name\": \"traffic light\"},\n    # {\"id\": 10, \"name\": \"traffic sign\"},\n]\n\n# \"traffic light\":9, \"traffic sign\":10\ncat = {\"pedestrian\":1, \"rider\":2, \"car\":3, \"truck\":4, \"bus\":5, \"train\":6, \"motorcycle\":7, \"bicycle\":8,}\n# 1: pedestrian\n# 2: rider\n# 3: car\n# 4: truck\n# 5: bus\n# 6: train\n# 7: motorcycle\n# 8: bicycle  \n# 9: traffic light --- Don't need tracking\n# 10: traffic sign  ---   Don't need tracking\n# For MOT and MOTS, only the first 8 classes are used and evaluated\n\ndef read_tid_num_per_video(video_ann_dir):\n    anns = np.loadtxt(video_ann_dir, dtype=np.float32, delimiter=',')\n    max_tid = max(anns[:, 1])\n    return int(max_tid)\n    \n\nfor s in split:\n    img_id = 1; ann_id = 1; video_cnt = 0; \n    tid_cnt = 0 \n    images = []; annotations=[]; videos = []\n    all_video=[d for d in os.listdir(os.path.join(labels_path, s)) if '.json' in d]\n    need_index=np.random.choice(range(len(all_video)),len(all_video)//3,replace=False)\n    video_labels_list = [all_video[i] for i in need_index]\n    \n    for v_label in tqdm.tqdm(video_labels_list):\n        video_cnt += 1\n        video = {'id': video_cnt, 'file_name':v_label[:-5]}\n        videos.append(video)\n        \n        v_lab_path = os.path.join(os.path.join(labels_path, s, v_label))\n        with open(v_lab_path, 'r') as f:\n            annos=json.load(f)# anns per video\n        num_frames  = len(annos)# the number of frames per video\n        sign_cnt = 0\n        for ann in annos:# ann --- 每一帧的标注信息，这里放过了空白帧\n            \n            img_name = os.path.join(img_path, s, ann['videoName'], ann['name'])\n            img=cv2.imread(img_name)\n            h,w,_ = img.shape\n            \n            img_info = {\n            'file_name':img_name,\n            'width':w,\n            'height':h,\n            'id': img_id,\n            'frame_id': ann['frameIndex'] + 1,# 严格按照 数据集 标记的帧indx 来进行排序，这将有利于 判断 相邻帧 之间的关系\n            'prev_image_id': -1 if ann['frameIndex'] == 0 else img_id - 1,\n            'next_image_id': -1 if ann['frameIndex'] == num_frames-1 else img_id + 1,\n            'video_id': video_cnt\n            }# 所有的图像信息images中 ，这里也会添加空白标注帧的图像信息\n            images.append(img_info)\n            \n            for j, lab in enumerate(ann['labels']):\n                #  lab---每一个实例的标注信息  如果遇到空白标注帧--ann['labels']为空 则循环不执行 如果帧为非空 则继续执行此循环\n                if lab['category'] in cat:# 为了避免 'other vehicle' 类\n                    pass\n                else:\n                    continue\n                    \n                track_id = lab['id']\n                     \n                if sign_cnt == 0 and j==0:\n                    firstid = track_id\n                    sign_cnt = 1      \n                     \n                tid_curr = int(track_id) - int(firstid) + 1\n                tid_cnt+=1\n                is_crowd = lab['attributes']['crowd']\n                x1, y1, x2, y2=lab['box2d']['x1'], lab['box2d']['y1'], lab['box2d']['x2'], lab['box2d']['y2']\n                \n                annotation = {\n                    'image_id': img_id,\n                    'conf': 1,\n                    'bbox': [x1, y1, x2-x1, y2-y1],\n                    'category_id': cat[lab['category']],\n                    'id': ann_id,\n                    'iscrowd':  1 if is_crowd else 0,\n                    'track_id': tid_curr + tid_cnt,\n                    'segmentation': [],\n                    'area': (x2-x1)*(y2-y1),\n                    'box_id':int(track_id)   \n                }\n                annotations.append(annotation)\n                ann_id += 1\n                    \n            img_id += 1\n            \n        # tid_cnt += read_tid_num_per_video(os.path.join(mot_labels_path, s, v_label[:-5]+'.txt'))\n            \n    dataset_dict = {}\n    dataset_dict[\"images\"] = images\n    dataset_dict[\"annotations\"] = annotations\n    dataset_dict[\"categories\"] = categories\n    dataset_dict[\"videos\"] = videos\n    \n    json_str = json.dumps(dataset_dict)\n    print(f' The number of detection objects is {ann_id - 1}, The number of detection imgs is {img_id -1} .')\n    with open(out_path+f'{s}.json', 'w') as json_file:\n        json_file.write(json_str)"
  },
  {
    "path": "tools/convert_cityperson_to_coco.py",
    "content": "import os\nimport numpy as np\nimport json\nfrom PIL import Image\n\nDATA_PATH = 'datasets/Cityscapes/'\nDATA_FILE_PATH = 'datasets/data_path/citypersons.train'\nOUT_PATH = DATA_PATH + 'annotations/'\n\ndef load_paths(data_path):\n    with open(data_path, 'r') as file:\n        img_files = file.readlines()\n        img_files = [x.replace('\\n', '') for x in img_files]\n        img_files = list(filter(lambda x: len(x) > 0, img_files))\n    label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt') for x in img_files]\n    return img_files, label_files                    \n\nif __name__ == '__main__':\n    if not os.path.exists(OUT_PATH):\n        os.mkdir(OUT_PATH)\n\n    out_path = OUT_PATH + 'train.json'\n    out = {'images': [], 'annotations': [], 'categories': [{'id': 1, 'name': 'person'}]}\n    img_paths, label_paths = load_paths(DATA_FILE_PATH)\n    image_cnt = 0\n    ann_cnt = 0\n    video_cnt = 0\n    for img_path, label_path in zip(img_paths, label_paths):\n        image_cnt += 1\n        im = Image.open(os.path.join(\"datasets\", img_path))\n        image_info = {'file_name': img_path, \n                        'id': image_cnt,\n                        'height': im.size[1], \n                        'width': im.size[0]}\n        out['images'].append(image_info)\n        # Load labels\n        if os.path.isfile(os.path.join(\"datasets\", label_path)):\n            labels0 = np.loadtxt(os.path.join(\"datasets\", label_path), dtype=np.float32).reshape(-1, 6)\n            # Normalized xywh to pixel xyxy format\n            labels = labels0.copy()\n            labels[:, 2] = image_info['width'] * (labels0[:, 2] - labels0[:, 4] / 2)\n            labels[:, 3] = image_info['height'] * (labels0[:, 3] - labels0[:, 5] / 2)\n            labels[:, 4] = image_info['width'] * labels0[:, 4]\n            labels[:, 5] = image_info['height'] * labels0[:, 5]\n        else:\n            labels = np.array([])\n        for i in range(len(labels)):\n            ann_cnt += 1\n            fbox = labels[i, 2:6].tolist()\n            ann = {'id': ann_cnt,\n                    'category_id': 1,\n                    'image_id': image_cnt,\n                    'track_id': -1,\n                    'bbox': fbox,\n                    'area': fbox[2] * fbox[3],\n                    'iscrowd': 0}\n            out['annotations'].append(ann)\n    print('loaded train for {} images and {} samples'.format(len(out['images']), len(out['annotations'])))\n    json.dump(out, open(out_path, 'w'))\n"
  },
  {
    "path": "tools/convert_crowdhuman_to_coco.py",
    "content": "import os\nimport numpy as np\nimport json\nfrom PIL import Image\n\nDATA_PATH = 'datasets/crowdhuman/'\nOUT_PATH = DATA_PATH + 'annotations/'\nSPLITS = ['val', 'train']\nDEBUG = False\n\ndef load_func(fpath):\n    print('fpath', fpath)\n    assert os.path.exists(fpath)\n    with open(fpath,'r') as fid:\n        lines = fid.readlines()\n    records =[json.loads(line.strip('\\n')) for line in lines]\n    return records\n\nif __name__ == '__main__':\n    if not os.path.exists(OUT_PATH):\n        os.mkdir(OUT_PATH)\n    for split in SPLITS:\n        data_path = DATA_PATH + split\n        out_path = OUT_PATH + '{}.json'.format(split)\n        out = {'images': [], 'annotations': [], 'categories': [{'id': 1, 'name': 'person'}]}\n        ann_path = DATA_PATH + 'annotation_{}.odgt'.format(split)\n        anns_data = load_func(ann_path)\n        image_cnt = 0\n        ann_cnt = 0\n        video_cnt = 0\n        for ann_data in anns_data:\n            image_cnt += 1\n            file_path = DATA_PATH + 'CrowdHuman_{}/Images/'.format(split) + '{}.jpg'.format(ann_data['ID'])\n            im = Image.open(file_path)\n            image_info = {'file_name': '{}.jpg'.format(ann_data['ID']), \n                          'id': image_cnt,\n                          'height': im.size[1], \n                          'width': im.size[0]}\n            out['images'].append(image_info)\n            if split != 'test':\n                anns = ann_data['gtboxes']\n                for i in range(len(anns)):\n                    ann_cnt += 1\n                    fbox = anns[i]['fbox']\n                    ann = {'id': ann_cnt,\n                         'category_id': 1,\n                         'image_id': image_cnt,\n                         'track_id': -1,\n                         'bbox_vis': anns[i]['vbox'],\n                         'bbox': fbox,\n                         'area': fbox[2] * fbox[3],\n                         'iscrowd': 1 if 'extra' in anns[i] and \\\n                                         'ignore' in anns[i]['extra'] and \\\n                                         anns[i]['extra']['ignore'] == 1 else 0}\n                    out['annotations'].append(ann)\n        print('loaded {} for {} images and {} samples'.format(split, len(out['images']), len(out['annotations'])))\n        json.dump(out, open(out_path, 'w'))"
  },
  {
    "path": "tools/convert_dancetrack_to_coco.py",
    "content": "import os\nimport numpy as np\nimport json\nimport cv2\n\n\n# Use the same script for MOT16\nDATA_PATH = 'datasets/dancetrack'\nOUT_PATH = os.path.join(DATA_PATH, 'annotations')\nSPLITS = ['train','test']  # --> split training data to train_half and val_half.\nHALF_VIDEO = True\nCREATE_SPLITTED_ANN = True\nCREATE_SPLITTED_DET = True\n\n\nif __name__ == '__main__':\n\n    if not os.path.exists(OUT_PATH):\n        os.makedirs(OUT_PATH)\n\n    for split in SPLITS:\n        if split == \"test\":\n            data_path = os.path.join(DATA_PATH, 'test')\n        else:\n            data_path = os.path.join(DATA_PATH, 'train')\n        out_path = os.path.join(OUT_PATH, '{}.json'.format(split))\n        out = {'images': [], 'annotations': [], 'videos': [],\n               'categories': [{'id': 1, 'name': 'pedestrian'}]}\n        seqs = os.listdir(data_path)\n        image_cnt = 0\n        ann_cnt = 0\n        video_cnt = 0\n        tid_curr = 0\n        tid_last = -1\n        for seq in sorted(seqs):\n            if '.DS_Store' in seq:\n                continue\n            if 'mot' in DATA_PATH and (split != 'test' and not ('FRCNN' in seq)):\n                continue\n            video_cnt += 1  # video sequence number.\n            out['videos'].append({'id': video_cnt, 'file_name': seq})\n            seq_path = os.path.join(data_path, seq)\n            img_path = os.path.join(seq_path, 'img1')\n            ann_path = os.path.join(seq_path, 'gt/gt.txt')\n            images = os.listdir(img_path)\n            num_images = len([image for image in images if 'jpg' in image])  # half and half\n\n            if HALF_VIDEO and ('half' in split):\n                image_range = [0, num_images // 2] if 'train' in split else \\\n                              [num_images // 2 + 1, num_images - 1]\n            else:\n                image_range = [0, num_images - 1]\n\n            for i in range(num_images):\n                if i < image_range[0] or i > image_range[1]:\n                    continue\n                img = cv2.imread(os.path.join(data_path, '{}/img1/{:08d}.jpg'.format(seq, i + 1)))\n                height, width = img.shape[:2]\n                image_info = {'file_name': '{}/img1/{:08d}.jpg'.format(seq, i + 1),  # image name.\n                              'id': image_cnt + i + 1,  # image number in the entire training set.\n                              'frame_id': i + 1 - image_range[0],  # image number in the video sequence, starting from 1.\n                              'prev_image_id': image_cnt + i if i > 0 else -1,  # image number in the entire training set.\n                              'next_image_id': image_cnt + i + 2 if i < num_images - 1 else -1,\n                              'video_id': video_cnt,\n                              'height': height, 'width': width}\n                out['images'].append(image_info)\n            print('{}: {} images'.format(seq, num_images))\n            if split != 'test':\n                det_path = os.path.join(seq_path, 'det/det.txt')\n                anns = np.loadtxt(ann_path, dtype=np.float32, delimiter=',')\n                sorted_index=np.argsort(anns[:,1])\n                anns=anns[sorted_index]\n                if ('half' in split):\n                    dets = np.loadtxt(det_path, dtype=np.float32, delimiter=',')\n                if CREATE_SPLITTED_ANN and ('half' in split):\n                    anns_out = np.array([anns[i] for i in range(anns.shape[0])\n                                         if int(anns[i][0]) - 1 >= image_range[0] and\n                                         int(anns[i][0]) - 1 <= image_range[1]], np.float32) \n                    anns_out[:, 0] -= image_range[0]\n                    gt_out = os.path.join(seq_path, 'gt/gt_{}.txt'.format(split))\n                    fout = open(gt_out, 'w')\n                    for o in anns_out:\n                        fout.write('{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:.6f}\\n'.format(\n                                    int(o[0]), int(o[1]), int(o[2]), int(o[3]), int(o[4]), int(o[5]),\n                                    int(o[6]), int(o[7]), o[8]))\n                    fout.close()\n                if CREATE_SPLITTED_DET and ('half' in split):\n                    dets_out = np.array([dets[i] for i in range(dets.shape[0])\n                                         if int(dets[i][0]) - 1 >= image_range[0] and\n                                         int(dets[i][0]) - 1 <= image_range[1]], np.float32)\n                    dets_out[:, 0] -= image_range[0]\n                    det_out = os.path.join(seq_path, 'det/det_{}.txt'.format(split))\n                    dout = open(det_out, 'w')\n                    for o in dets_out:\n                        dout.write('{:d},{:d},{:.1f},{:.1f},{:.1f},{:.1f},{:.6f}\\n'.format(\n                                    int(o[0]), int(o[1]), float(o[2]), float(o[3]), float(o[4]), float(o[5]),\n                                    float(o[6])))\n                    dout.close()\n\n                print('{} ann images'.format(int(anns[:, 0].max())))\n                for i in range(anns.shape[0]):\n                    frame_id = int(anns[i][0])\n                    if frame_id - 1 < image_range[0] or frame_id - 1 > image_range[1]:\n                        continue\n                    track_id = int(anns[i][1])\n                    # cat_id = int(anns[i][7])\n                    ann_cnt += 1\n                    category_id = 1  # pedestrian(non-static)\n                    if not track_id == tid_last:\n                        tid_curr += 1\n                        tid_last = track_id\n                    ann = {'id': ann_cnt,\n                           'category_id': category_id,\n                           'image_id': image_cnt + frame_id,\n                           'track_id': tid_curr,\n                           'bbox': anns[i][2:6].tolist(),\n                           'conf': 1,\n                           'iscrowd': 0,\n                           'area': float(anns[i][4] * anns[i][5])}\n                    out['annotations'].append(ann)\n            image_cnt += num_images\n            print(tid_curr, tid_last)\n        print('loaded {} for {} images and {} samples'.format(split, len(out['images']), len(out['annotations'])))\n        json.dump(out, open(out_path, 'w'))"
  },
  {
    "path": "tools/convert_ethz_to_coco.py",
    "content": "import os\nimport numpy as np\nimport json\nfrom PIL import Image\n\nDATA_PATH = 'datasets/ETHZ/'\nDATA_FILE_PATH = 'datasets/data_path/eth.train'\nOUT_PATH = DATA_PATH + 'annotations/'\n\ndef load_paths(data_path):\n    with open(data_path, 'r') as file:\n        img_files = file.readlines()\n        img_files = [x.replace('\\n', '') for x in img_files]\n        img_files = list(filter(lambda x: len(x) > 0, img_files))\n    label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt') for x in img_files]\n    return img_files, label_files                    \n\nif __name__ == '__main__':\n    if not os.path.exists(OUT_PATH):\n        os.mkdir(OUT_PATH)\n\n    out_path = OUT_PATH + 'train.json'\n    out = {'images': [], 'annotations': [], 'categories': [{'id': 1, 'name': 'person'}]}\n    img_paths, label_paths = load_paths(DATA_FILE_PATH)\n    image_cnt = 0\n    ann_cnt = 0\n    video_cnt = 0\n    for img_path, label_path in zip(img_paths, label_paths):\n        image_cnt += 1\n        im = Image.open(os.path.join(\"datasets\", img_path))\n        image_info = {'file_name': img_path, \n                        'id': image_cnt,\n                        'height': im.size[1], \n                        'width': im.size[0]}\n        out['images'].append(image_info)\n        # Load labels\n        if os.path.isfile(os.path.join(\"datasets\", label_path)):\n            labels0 = np.loadtxt(os.path.join(\"datasets\", label_path), dtype=np.float32).reshape(-1, 6)\n            # Normalized xywh to pixel xyxy format\n            labels = labels0.copy()\n            labels[:, 2] = image_info['width'] * (labels0[:, 2] - labels0[:, 4] / 2)\n            labels[:, 3] = image_info['height'] * (labels0[:, 3] - labels0[:, 5] / 2)\n            labels[:, 4] = image_info['width'] * labels0[:, 4]\n            labels[:, 5] = image_info['height'] * labels0[:, 5]\n        else:\n            labels = np.array([])\n        for i in range(len(labels)):\n            ann_cnt += 1\n            fbox = labels[i, 2:6].tolist()\n            ann = {'id': ann_cnt,\n                    'category_id': 1,\n                    'image_id': image_cnt,\n                    'track_id': -1,\n                    'bbox': fbox,\n                    'area': fbox[2] * fbox[3],\n                    'iscrowd': 0}\n            out['annotations'].append(ann)\n    print('loaded train for {} images and {} samples'.format(len(out['images']), len(out['annotations'])))\n    json.dump(out, open(out_path, 'w'))\n"
  },
  {
    "path": "tools/convert_kitti_to_coco.py",
    "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pickle\nimport json\nimport numpy as np\nimport os\nimport cv2\nDATA_PATH = 'datasets/KITTI/'\nOUT_PATH = 'datasets/KITTI/annotations'\nSPLITS = ['train']\nVIDEO_SETS = {'train': range(21), 'test': range(29), \n  'train_half': range(21), 'val_half': range(21)}\nCREATE_HALF_LABEL = True\nDEBUG = False\n\n'''\n#Values    Name      Description\n----------------------------------------------------------------------------\n   1    frame        Frame within the sequence where the object appearers\n   1    track id     Unique tracking id of this object within this sequence\n   1    type         Describes the type of object: 'Car', 'Van', 'Truck',\n                     'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',\n                     'Misc' or 'DontCare'\n   1    truncated    Integer (0,1,2) indicating the level of truncation.\n                     Note that this is in contrast to the object detection\n                     benchmark where truncation is a float in [0,1].\n   1    occluded     Integer (0,1,2,3) indicating occlusion state:\n                     0 = fully visible, 1 = partly occluded\n                     2 = largely occluded, 3 = unknown\n   1    alpha        Observation angle of object, ranging [-pi..pi]\n   4    bbox         2D bounding box of object in the image (0-based index):\n                     contains left, top, right, bottom pixel coordinates\n   3    dimensions   3D object dimensions: height, width, length (in meters)\n   3    location     3D object location x,y,z in camera coordinates (in meters)\n   1    rotation_y   Rotation ry around Y-axis in camera coordinates [-pi..pi]\n   1    score        Only for results: Float, indicating confidence in\n                     detection, needed for p/r curves, higher is better.\n'''\n\ndef project_to_image(pts_3d, P):\n  # pts_3d: n x 3\n  # P: 3 x 4\n  # return: n x 2\n  pts_3d_homo = np.concatenate(\n    [pts_3d, np.ones((pts_3d.shape[0], 1), dtype=np.float32)], axis=1)\n  pts_2d = np.dot(P, pts_3d_homo.transpose(1, 0)).transpose(1, 0)\n  pts_2d = pts_2d[:, :2] / pts_2d[:, 2:]\n  return pts_2d\n\ndef read_clib(calib_path):\n  f = open(calib_path, 'r')\n  for i, line in enumerate(f):\n    if i == 2:\n      calib = np.array(line.strip().split(' ')[1:], dtype=np.float32)\n      calib = calib.reshape(3, 4)\n      return calib\n\ndef _bbox_to_coco_bbox(bbox):\n  return [(bbox[0]), (bbox[1]),\n          (bbox[2] - bbox[0]), (bbox[3] - bbox[1])]\n\ncats = ['Pedestrian', 'Car', 'Cyclist', 'Van', 'Truck',  'Person_sitting',\n        'Tram', 'Misc', 'DontCare']\n\n\ncat_ids = {cat: i + 1 for i, cat in enumerate(cats)}\ncat_ids['Person'] = cat_ids['Person_sitting']\n\ncat_info = []\nfor i, cat in enumerate(['pedestrian', 'car']):\n  cat_info.append({'name': cat, 'id': i + 1})\n\nif __name__ == '__main__':\n  if not os.path.exists(OUT_PATH):\n    os.mkdir(OUT_PATH)\n\n  for split in SPLITS:\n    ann_dir = DATA_PATH + '/label_02/'\n    ret = {'images': [], 'annotations': [], \"categories\": cat_info,\n           'videos': []}\n    num_images = 0\n    for i in VIDEO_SETS[split]:\n      image_id_base = num_images\n      video_name = '{:04d}'.format(i)\n      ret['videos'].append({'id': i + 1, 'file_name': video_name})\n      ann_dir = 'train'  if not ('test' in split) else split\n      video_path = DATA_PATH + \\\n        '{}ing/image_02/{}'.format(ann_dir, video_name)\n      #calib_path = DATA_PATH + 'data_tracking_calib/{}ing/calib/'.format(ann_dir) \\\n      #  + '{}.txt'.format(video_name)\n      #calib = read_clib(calib_path)\n      image_files = sorted(os.listdir(video_path))\n      num_images_video = len(image_files)\n      if CREATE_HALF_LABEL and 'half' in split:\n        image_range = [0, num_images_video // 2 - 1] if split == 'train_half' else \\\n          [num_images_video // 2, num_images_video - 1]\n      else:\n        image_range = [0, num_images_video - 1]\n      print('num_frames', video_name, image_range[1] - image_range[0] + 1)\n      for j, image_name in enumerate(image_files):\n        if (j < image_range[0] or j > image_range[1]):\n          continue\n        num_images += 1\n        filen_name='training/image_02/{}/{:06d}.png'.format(video_name, j)\n        data_path = os.path.join(DATA_PATH,filen_name)\n        img = cv2.imread(data_path)\n        height, width = img.shape[:2]\n        image_info = {'file_name': filen_name,\n                      'id': num_images,\n                      #'calib': calib.tolist(),\n                      'video_id': i + 1,\n                      'frame_id': j + 1 - image_range[0],\n                      'prev_image_id': num_images-1,  \n                      'next_image_id': num_images+1,\n                      'height': height, 'width': width}\n        ret['images'].append(image_info)\n\n      if split == 'test':\n        continue\n      # 0 -1 DontCare -1 -1 -10.000000 219.310000 188.490000 245.500000 218.560000 -1000.000000 -1000.000000 -1000.000000 -10.000000 -1.000000 -1.000000 -1.000000\n      ann_path = DATA_PATH + 'training/label_02/{}.txt'.format(video_name)\n      anns = open(ann_path, 'r')\n      if CREATE_HALF_LABEL and 'half' in split:\n        label_out_folder = DATA_PATH + 'label_02_{}/'.format(split)\n        label_out_path = label_out_folder + '{}.txt'.format(video_name)\n        if not os.path.exists(label_out_folder):\n          os.mkdir(label_out_folder)\n        label_out_file = open(label_out_path, 'w')\n      \n      for ann_ind, txt in enumerate(anns):\n        tmp = txt[:-1].split(' ')\n        frame_id = int(tmp[0])\n        track_id = int(tmp[1])\n        cat_id = cat_ids[tmp[2]]\n        # fillter person and car\n        if cat_id not in [1,2]:\n            continue\n        truncated = int(float(tmp[3]))\n        occluded = int(tmp[4])\n        alpha = float(tmp[5])\n        bbox = [float(tmp[6]), float(tmp[7]), float(tmp[8]), float(tmp[9])]\n        dim = [float(tmp[10]), float(tmp[11]), float(tmp[12])]\n        location = [float(tmp[13]), float(tmp[14]), float(tmp[15])]\n        rotation_y = float(tmp[16])\n        #amodel_center = project_to_image(\n        #  np.array([location[0], location[1] - dim[0] / 2, location[2]], \n        #    np.float32).reshape(1, 3), calib)[0].tolist()  \n        ann = {'image_id': frame_id + 1 - image_range[0] + image_id_base,\n               'id': int(len(ret['annotations']) + 1),\n               'category_id': cat_id,\n               'dim': dim,\n               'bbox': _bbox_to_coco_bbox(bbox),\n               'depth': location[2],\n               'alpha': alpha,\n               'truncated': truncated,\n               'occluded': occluded,\n               'location': location,\n               'rotation_y': rotation_y,\n               'iscrowd':0,\n               'area': (bbox[2] - bbox[0])*(bbox[3] - bbox[1]),\n               'conf':1.0,\n               #'amodel_center': amodel_center,\n               'track_id': track_id + 1,\n               'box_id': int(track_id + 1)}\n        if CREATE_HALF_LABEL and 'half' in split:\n          if (frame_id < image_range[0] or frame_id > image_range[1]):\n            continue\n          out_frame_id = frame_id - image_range[0]\n          label_out_file.write('{} {}'.format(\n            out_frame_id, txt[txt.find(' ') + 1:]))\n        \n        ret['annotations'].append(ann)\n      \n    print(\"# images: \", len(ret['images']))\n    print(\"# annotations: \", len(ret['annotations']))\n\n    out_path = '{}/{}.json'.format(\n      OUT_PATH, split)\n    json.dump(ret, open(out_path, 'w'))"
  },
  {
    "path": "tools/convert_mot17_to_coco.py",
    "content": "import os\nimport numpy as np\nimport json\nimport cv2\n\n\n# Use the same script for MOT16\nDATA_PATH = 'datasets/mot'\nOUT_PATH = os.path.join(DATA_PATH, 'annotations')\nSPLITS = ['val_half','train_half',\"train\",\"test\"]  # --> split training data to train_half and val_half.\nHALF_VIDEO = True\nCREATE_SPLITTED_ANN = True\nCREATE_SPLITTED_DET = True\n\n\nif __name__ == '__main__':\n\n    if not os.path.exists(OUT_PATH):\n        os.makedirs(OUT_PATH)\n\n    for split in SPLITS:\n        if split == \"test\":\n            data_path = os.path.join(DATA_PATH, 'test')\n        else:\n            data_path = os.path.join(DATA_PATH, 'train')\n        out_path = os.path.join(OUT_PATH, '{}.json'.format(split))\n        out = {'images': [], 'annotations': [], 'videos': [],\n               'categories': [{'id': 1, 'name': 'pedestrian'}]}\n        seqs = os.listdir(data_path)\n        image_cnt = 0\n        ann_cnt = 0\n        video_cnt = 0\n        tid_curr = 0\n        tid_last = -1\n        for seq in sorted(seqs):\n            if '.DS_Store' in seq:\n                continue\n            if 'mot' in DATA_PATH and (split != 'test' and not ('FRCNN' in seq)):\n                continue\n            video_cnt += 1  # video sequence number.\n            out['videos'].append({'id': video_cnt, 'file_name': seq})\n            seq_path = os.path.join(data_path, seq)\n            img_path = os.path.join(seq_path, 'img1')\n            ann_path = os.path.join(seq_path, 'gt/gt.txt')\n            images = os.listdir(img_path)\n            num_images = len([image for image in images if 'jpg' in image])  # half and half\n\n            if HALF_VIDEO and ('half' in split):\n                image_range = [0, num_images // 2] if 'train' in split else \\\n                              [num_images // 2 + 1, num_images - 1]\n            else:\n                image_range = [0, num_images - 1]\n\n            for i in range(num_images):\n                if i < image_range[0] or i > image_range[1]:\n                    continue\n                img = cv2.imread(os.path.join(data_path, '{}/img1/{:06d}.jpg'.format(seq, i + 1)))\n                height, width = img.shape[:2]\n                image_info = {'file_name': '{}/img1/{:06d}.jpg'.format(seq, i + 1),  # image name.\n                              'id': image_cnt + i + 1,  # image number in the entire training set.\n                              'frame_id': i + 1 - image_range[0],  # image number in the video sequence, starting from 1.\n                              'prev_image_id': image_cnt + i if i > 0 else -1,  # image number in the entire training set.\n                              'next_image_id': image_cnt + i + 2 if i < num_images - 1 else -1,\n                              'video_id': video_cnt,\n                              'height': height, 'width': width}\n                out['images'].append(image_info)\n            print('{}: {} images'.format(seq, num_images))\n            if split != 'test':\n                det_path = os.path.join(seq_path, 'det/det.txt')\n                anns = np.loadtxt(ann_path, dtype=np.float32, delimiter=',')\n                dets = np.loadtxt(det_path, dtype=np.float32, delimiter=',')\n                if CREATE_SPLITTED_ANN and ('half' in split):\n                    anns_out = np.array([anns[i] for i in range(anns.shape[0])\n                                         if int(anns[i][0]) - 1 >= image_range[0] and\n                                         int(anns[i][0]) - 1 <= image_range[1]], np.float32) \n                    anns_out[:, 0] -= image_range[0]\n                    gt_out = os.path.join(seq_path, 'gt/gt_{}.txt'.format(split))\n                    fout = open(gt_out, 'w')\n                    for o in anns_out:\n                        fout.write('{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:.6f}\\n'.format(\n                                    int(o[0]), int(o[1]), int(o[2]), int(o[3]), int(o[4]), int(o[5]),\n                                    int(o[6]), int(o[7]), o[8]))\n                    fout.close()\n                if CREATE_SPLITTED_DET and ('half' in split):\n                    dets_out = np.array([dets[i] for i in range(dets.shape[0])\n                                         if int(dets[i][0]) - 1 >= image_range[0] and\n                                         int(dets[i][0]) - 1 <= image_range[1]], np.float32)\n                    dets_out[:, 0] -= image_range[0]\n                    det_out = os.path.join(seq_path, 'det/det_{}.txt'.format(split))\n                    dout = open(det_out, 'w')\n                    for o in dets_out:\n                        dout.write('{:d},{:d},{:.1f},{:.1f},{:.1f},{:.1f},{:.6f}\\n'.format(\n                                    int(o[0]), int(o[1]), float(o[2]), float(o[3]), float(o[4]), float(o[5]),\n                                    float(o[6])))\n                    dout.close()\n\n                print('{} ann images'.format(int(anns[:, 0].max())))\n                for i in range(anns.shape[0]):\n                    frame_id = int(anns[i][0])\n                    if frame_id - 1 < image_range[0] or frame_id - 1 > image_range[1]:\n                        continue\n                    track_id = int(anns[i][1])\n                    cat_id = int(anns[i][7])\n                    ann_cnt += 1\n                    if not ('15' in DATA_PATH):\n                        #if not (float(anns[i][8]) >= 0.25):  # visibility.\n                            #continue\n                        if not (int(anns[i][6]) == 1):  # whether ignore.\n                            continue\n                        if int(anns[i][7]) in [3, 4, 5, 6, 9, 10, 11]:  # Non-person\n                            continue\n                        if int(anns[i][7]) in [2, 7, 8, 12]:  # Ignored person\n                            category_id = -1\n                        else:\n                            category_id = 1  # pedestrian(non-static)\n                            if not track_id == tid_last:\n                                tid_curr += 1\n                                tid_last = track_id\n                    else:\n                        category_id = 1\n                    ann = {'id': ann_cnt,\n                           'category_id': category_id,\n                           'image_id': image_cnt + frame_id,\n                           'track_id': tid_curr,\n                           'bbox': anns[i][2:6].tolist(),\n                           'conf': float(anns[i][6]),\n                           'iscrowd': 0,\n                           'area': float(anns[i][4] * anns[i][5])}\n                    out['annotations'].append(ann)\n            image_cnt += num_images\n            print(tid_curr, tid_last)\n        print('loaded {} for {} images and {} samples'.format(split, len(out['images']), len(out['annotations'])))\n        json.dump(out, open(out_path, 'w'))"
  },
  {
    "path": "tools/convert_mot20_to_coco.py",
    "content": "import os\nimport numpy as np\nimport json\nimport cv2\n\n\n# Use the same script for MOT16\nDATA_PATH = 'datasets/MOT20'\nOUT_PATH = os.path.join(DATA_PATH, 'annotations')\nSPLITS = ['train_half', 'val_half', 'train', 'test']  # --> split training data to train_half and val_half.\nHALF_VIDEO = True\nCREATE_SPLITTED_ANN = True\nCREATE_SPLITTED_DET = True\n\n\nif __name__ == '__main__':\n\n    if not os.path.exists(OUT_PATH):\n        os.makedirs(OUT_PATH)\n\n    for split in SPLITS:\n        if split == \"test\":\n            data_path = os.path.join(DATA_PATH, 'test')\n        else:\n            data_path = os.path.join(DATA_PATH, 'train')\n        out_path = os.path.join(OUT_PATH, '{}.json'.format(split))\n        out = {'images': [], 'annotations': [], 'videos': [],\n               'categories': [{'id': 1, 'name': 'pedestrian'}]}\n        seqs = os.listdir(data_path)\n        image_cnt = 0\n        ann_cnt = 0\n        video_cnt = 0\n        tid_curr = 0\n        tid_last = -1\n        for seq in sorted(seqs):\n            if '.DS_Store' in seq:\n                continue\n            video_cnt += 1  # video sequence number.\n            out['videos'].append({'id': video_cnt, 'file_name': seq})\n            seq_path = os.path.join(data_path, seq)\n            img_path = os.path.join(seq_path, 'img1')\n            ann_path = os.path.join(seq_path, 'gt/gt.txt')\n            images = os.listdir(img_path)\n            num_images = len([image for image in images if 'jpg' in image])  # half and half\n\n            if HALF_VIDEO and ('half' in split):\n                image_range = [0, num_images // 2] if 'train' in split else \\\n                              [num_images // 2 + 1, num_images - 1]\n            else:\n                image_range = [0, num_images - 1]\n\n            for i in range(num_images):\n                if i < image_range[0] or i > image_range[1]:\n                    continue\n                img = cv2.imread(os.path.join(data_path, '{}/img1/{:06d}.jpg'.format(seq, i + 1)))\n                height, width = img.shape[:2]\n                image_info = {'file_name': '{}/img1/{:06d}.jpg'.format(seq, i + 1),  # image name.\n                              'id': image_cnt + i + 1,  # image number in the entire training set.\n                              'frame_id': i + 1 - image_range[0],  # image number in the video sequence, starting from 1.\n                              'prev_image_id': image_cnt + i if i > 0 else -1,  # image number in the entire training set.\n                              'next_image_id': image_cnt + i + 2 if i < num_images - 1 else -1,\n                              'video_id': video_cnt,\n                              'height': height, 'width': width}\n                out['images'].append(image_info)\n            print('{}: {} images'.format(seq, num_images))\n            if split != 'test':\n                det_path = os.path.join(seq_path, 'det/det.txt')\n                anns = np.loadtxt(ann_path, dtype=np.float32, delimiter=',')\n                dets = np.loadtxt(det_path, dtype=np.float32, delimiter=',')\n                if CREATE_SPLITTED_ANN and ('half' in split):\n                    anns_out = np.array([anns[i] for i in range(anns.shape[0])\n                                         if int(anns[i][0]) - 1 >= image_range[0] and\n                                         int(anns[i][0]) - 1 <= image_range[1]], np.float32) \n                    anns_out[:, 0] -= image_range[0]\n                    gt_out = os.path.join(seq_path, 'gt/gt_{}.txt'.format(split))\n                    fout = open(gt_out, 'w')\n                    for o in anns_out:\n                        fout.write('{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:d},{:.6f}\\n'.format(\n                                    int(o[0]), int(o[1]), int(o[2]), int(o[3]), int(o[4]), int(o[5]),\n                                    int(o[6]), int(o[7]), o[8]))\n                    fout.close()\n                if CREATE_SPLITTED_DET and ('half' in split):\n                    dets_out = np.array([dets[i] for i in range(dets.shape[0])\n                                         if int(dets[i][0]) - 1 >= image_range[0] and\n                                         int(dets[i][0]) - 1 <= image_range[1]], np.float32)\n                    dets_out[:, 0] -= image_range[0]\n                    det_out = os.path.join(seq_path, 'det/det_{}.txt'.format(split))\n                    dout = open(det_out, 'w')\n                    for o in dets_out:\n                        dout.write('{:d},{:d},{:.1f},{:.1f},{:.1f},{:.1f},{:.6f}\\n'.format(\n                                    int(o[0]), int(o[1]), float(o[2]), float(o[3]), float(o[4]), float(o[5]),\n                                    float(o[6])))\n                    dout.close()\n\n                print('{} ann images'.format(int(anns[:, 0].max())))\n                for i in range(anns.shape[0]):\n                    frame_id = int(anns[i][0])\n                    if frame_id - 1 < image_range[0] or frame_id - 1 > image_range[1]:\n                        continue\n                    track_id = int(anns[i][1])\n                    cat_id = int(anns[i][7])\n                    ann_cnt += 1\n                    if not ('15' in DATA_PATH):\n                        #if not (float(anns[i][8]) >= 0.25):  # visibility.\n                            #continue\n                        if not (int(anns[i][6]) == 1):  # whether ignore.\n                            continue\n                        if int(anns[i][7]) in [3, 4, 5, 6, 9, 10, 11]:  # Non-person\n                            continue\n                        if int(anns[i][7]) in [2, 7, 8, 12]:  # Ignored person\n                            #category_id = -1\n                            continue\n                        else:\n                            category_id = 1  # pedestrian(non-static)\n                            if not track_id == tid_last:\n                                tid_curr += 1\n                                tid_last = track_id\n                    else:\n                        category_id = 1\n                    ann = {'id': ann_cnt,\n                           'category_id': category_id,\n                           'image_id': image_cnt + frame_id,\n                           'track_id': tid_curr,\n                           'bbox': anns[i][2:6].tolist(),\n                           'conf': float(anns[i][6]),\n                           'iscrowd': 0,\n                           'area': float(anns[i][4] * anns[i][5])}\n                    out['annotations'].append(ann)\n            image_cnt += num_images\n            print(tid_curr, tid_last)\n        print('loaded {} for {} images and {} samples'.format(split, len(out['images']), len(out['annotations'])))\n        json.dump(out, open(out_path, 'w'))"
  },
  {
    "path": "tools/convert_video.py",
    "content": "import cv2\n\ndef convert_video(video_path):\n    cap = cv2.VideoCapture(video_path)\n    width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)  # float\n    height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)  # float\n    fps = cap.get(cv2.CAP_PROP_FPS)\n    video_name = video_path.split('/')[-1].split('.')[0]\n    save_name = video_name + '_converted'\n    save_path = video_path.replace(video_name, save_name)\n    vid_writer = cv2.VideoWriter(\n        save_path, cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (int(width), int(height))\n    )\n    while True:\n        ret_val, frame = cap.read()\n        if ret_val:\n            vid_writer.write(frame)\n            ch = cv2.waitKey(1)\n            if ch == 27 or ch == ord(\"q\") or ch == ord(\"Q\"):\n                break\n        else:\n            break\n\nif __name__ == \"__main__\":\n    video_path = 'videos/palace.mp4'\n    convert_video(video_path)"
  },
  {
    "path": "tools/mix_data_ablation.py",
    "content": "import json\nimport os\n\n\n\"\"\"\ncd datasets\nmkdir -p mix_mot_ch/annotations\ncp mot/annotations/val_half.json mix_mot_ch/annotations/val_half.json\ncp mot/annotations/test.json mix_mot_ch/annotations/test.json\ncd mix_mot_ch\nln -s ../mot/train mot_train\nln -s ../crowdhuman/CrowdHuman_train crowdhuman_train\nln -s ../crowdhuman/CrowdHuman_val crowdhuman_val\ncd ..\n\"\"\"\n\nmot_json = json.load(open('datasets/mot/annotations/train_half.json','r'))\n\nimg_list = list()\nfor img in mot_json['images']:\n    img['file_name'] = 'mot_train/' + img['file_name']\n    img_list.append(img)\n\nann_list = list()\nfor ann in mot_json['annotations']:\n    ann_list.append(ann)\n\nvideo_list = mot_json['videos']\ncategory_list = mot_json['categories']\n\nprint('mot17')\n\nmax_img = 10000\nmax_ann = 2000000\nmax_video = 10\n\ncrowdhuman_json = json.load(open('datasets/crowdhuman/annotations/train.json','r'))\nimg_id_count = 0\nfor img in crowdhuman_json['images']:\n    img_id_count += 1\n    img['file_name'] = 'crowdhuman_train/Images/' + img['file_name']\n    img['frame_id'] = img_id_count\n    img['prev_image_id'] = img['id'] + max_img\n    img['next_image_id'] = img['id'] + max_img\n    img['id'] = img['id'] + max_img\n    img['video_id'] = max_video\n    img_list.append(img)\n    \nfor ann in crowdhuman_json['annotations']:\n    ann['id'] = ann['id'] + max_ann\n    ann['image_id'] = ann['image_id'] + max_img\n    ann_list.append(ann)\n\nvideo_list.append({\n    'id': max_video,\n    'file_name': 'crowdhuman_train'\n})\n\nprint('crowdhuman_train')\n\nmax_img = 30000\nmax_ann = 10000000\n\ncrowdhuman_val_json = json.load(open('datasets/crowdhuman/annotations/val.json','r'))\nimg_id_count = 0\nfor img in crowdhuman_val_json['images']:\n    img_id_count += 1\n    img['file_name'] = 'crowdhuman_val/Images/' + img['file_name']\n    img['frame_id'] = img_id_count\n    img['prev_image_id'] = img['id'] + max_img\n    img['next_image_id'] = img['id'] + max_img\n    img['id'] = img['id'] + max_img\n    img['video_id'] = max_video\n    img_list.append(img)\n    \nfor ann in crowdhuman_val_json['annotations']:\n    ann['id'] = ann['id'] + max_ann\n    ann['image_id'] = ann['image_id'] + max_img\n    ann_list.append(ann)\n\nvideo_list.append({\n    'id': max_video,\n    'file_name': 'crowdhuman_val'\n})\n\nprint('crowdhuman_val')\n\nmix_json = dict()\nmix_json['images'] = img_list\nmix_json['annotations'] = ann_list\nmix_json['videos'] = video_list\nmix_json['categories'] = category_list\njson.dump(mix_json, open('datasets/mix_mot_ch/annotations/train.json','w'))"
  },
  {
    "path": "tools/mix_data_bdd100k.py",
    "content": "import json\nimport os\nimport numpy as np\n\n\"\"\"\ncd datasets\nmkdir -p mix_det/annotations\ncp mot/annotations/val_half.json mix_det/annotations/val_half.json\ncp mot/annotations/test.json mix_det/annotations/test.json\ncd mix_det\nln -s ../mot/train mot_train\nln -s ../crowdhuman/CrowdHuman_train crowdhuman_train\nln -s ../crowdhuman/CrowdHuman_val crowdhuman_val\nln -s ../Cityscapes cp_train\nln -s ../ETHZ ethz_train\ncd ..\n\"\"\"\n\nbdd100ktrain_json = json.load(open('datasets/bdd100k/annotations/mix_train_val.json','r'))\n# need_index=np.random.choice(range(len(bdd100ktrain_json['images'])),len(bdd100ktrain_json['images'])//3,replace=False)\n# need_img_ids={}\nimg_list = list()\nfor img in bdd100ktrain_json['images']:\n    img['is_video']=1\n    img_list.append(img)\n    # need_img_ids[bdd100ktrain_json['images'][img_idx]['id']]=1\n\nann_list = list()\nfor ann in bdd100ktrain_json['annotations']:\n    # if ann['image_id'] in need_img_ids:\n    ann_list.append(ann)\n\nvideo_list = bdd100ktrain_json['videos']\ncategory_list = bdd100ktrain_json['categories']\n\n\nprint('bdd100ktrain')\n\nmax_img = len(img_list)\nmax_ann = len(ann_list)\nmax_video = len(video_list)\n\nbdd100kval_json = json.load(open('datasets/bdd100k/annotations/val.json','r'))\nfor img in bdd100kval_json['images']:\n    img['prev_image_id'] = img['prev_image_id'] + max_img\n    img['next_image_id'] = img['next_image_id'] + max_img\n    img['id'] = img['id'] + max_img\n    img['video_id']+= max_video\n    img['is_video']=1\n    img_list.append(img)\n    \nfor ann in bdd100kval_json['annotations']:\n    ann['id'] = ann['id'] + max_ann\n    ann['image_id'] = ann['image_id'] + max_img\n    ann_list.append(ann)\n\nfor vid in bdd100kval_json['videos']:\n    vid['id']+=max_video\n    video_list.append(vid)\n\nprint('bdd100ktest')\n\nmix_json = dict()\nmix_json['images'] = img_list\nmix_json['annotations'] = ann_list\nmix_json['videos'] = video_list\nmix_json['categories'] = category_list\njson.dump(mix_json, open('datasets/bdd100k/annotations/mix_train_val.json','w'))\n"
  },
  {
    "path": "tools/mix_data_test_mot17.py",
    "content": "import json\nimport os\n\n\n\"\"\"\ncd datasets\nmkdir -p mix_det/annotations\ncp mot/annotations/val_half.json mix_det/annotations/val_half.json\ncp mot/annotations/test.json mix_det/annotations/test.json\ncd mix_det\nln -s ../mot/train mot_train\nln -s ../crowdhuman/CrowdHuman_train crowdhuman_train\nln -s ../crowdhuman/CrowdHuman_val crowdhuman_val\nln -s ../Cityscapes cp_train\nln -s ../ETHZ ethz_train\ncd ..\n\"\"\"\n\nmot_json = json.load(open('datasets/mot/annotations/train.json','r'))\n\nimg_list = list()\nfor img in mot_json['images']:\n    img['file_name'] = 'mot_train/' + img['file_name']\n    img_list.append(img)\n\nann_list = list()\nfor ann in mot_json['annotations']:\n    ann_list.append(ann)\n\nvideo_list = mot_json['videos']\ncategory_list = mot_json['categories']\n\n\nprint('mot17')\n\nmax_img = 10000\nmax_ann = 2000000\nmax_video = 10\n\ncrowdhuman_json = json.load(open('datasets/crowdhuman/annotations/train.json','r'))\nimg_id_count = 0\nfor img in crowdhuman_json['images']:\n    img_id_count += 1\n    img['file_name'] = 'crowdhuman_train/Images/' + img['file_name']\n    img['frame_id'] = img_id_count\n    img['prev_image_id'] = img['id'] + max_img\n    img['next_image_id'] = img['id'] + max_img\n    img['id'] = img['id'] + max_img\n    img['video_id'] = max_video\n    img_list.append(img)\n    \nfor ann in crowdhuman_json['annotations']:\n    ann['id'] = ann['id'] + max_ann\n    ann['image_id'] = ann['image_id'] + max_img\n    ann_list.append(ann)\n\nprint('crowdhuman_train')\n\nvideo_list.append({\n    'id': max_video,\n    'file_name': 'crowdhuman_train'\n})\n\n\nmax_img = 30000\nmax_ann = 10000000\n\ncrowdhuman_val_json = json.load(open('datasets/crowdhuman/annotations/val.json','r'))\nimg_id_count = 0\nfor img in crowdhuman_val_json['images']:\n    img_id_count += 1\n    img['file_name'] = 'crowdhuman_val/Images/' + img['file_name']\n    img['frame_id'] = img_id_count\n    img['prev_image_id'] = img['id'] + max_img\n    img['next_image_id'] = img['id'] + max_img\n    img['id'] = img['id'] + max_img\n    img['video_id'] = max_video\n    img_list.append(img)\n    \nfor ann in crowdhuman_val_json['annotations']:\n    ann['id'] = ann['id'] + max_ann\n    ann['image_id'] = ann['image_id'] + max_img\n    ann_list.append(ann)\n\nprint('crowdhuman_val')\n\nvideo_list.append({\n    'id': max_video,\n    'file_name': 'crowdhuman_val'\n})\n\nmax_img = 40000\nmax_ann = 20000000\n\nethz_json = json.load(open('datasets/ETHZ/annotations/train.json','r'))\nimg_id_count = 0\nfor img in ethz_json['images']:\n    img_id_count += 1\n    img['file_name'] = 'ethz_train/' + img['file_name'][5:]\n    img['frame_id'] = img_id_count\n    img['prev_image_id'] = img['id'] + max_img\n    img['next_image_id'] = img['id'] + max_img\n    img['id'] = img['id'] + max_img\n    img['video_id'] = max_video\n    img_list.append(img)\n    \nfor ann in ethz_json['annotations']:\n    ann['id'] = ann['id'] + max_ann\n    ann['image_id'] = ann['image_id'] + max_img\n    ann_list.append(ann)\n\nprint('ETHZ')\n\nvideo_list.append({\n    'id': max_video,\n    'file_name': 'ethz'\n})\n\nmax_img = 50000\nmax_ann = 25000000\n\ncp_json = json.load(open('datasets/Cityscapes/annotations/train.json','r'))\nimg_id_count = 0\nfor img in cp_json['images']:\n    img_id_count += 1\n    img['file_name'] = 'cp_train/' + img['file_name'][11:]\n    img['frame_id'] = img_id_count\n    img['prev_image_id'] = img['id'] + max_img\n    img['next_image_id'] = img['id'] + max_img\n    img['id'] = img['id'] + max_img\n    img['video_id'] = max_video\n    img_list.append(img)\n    \nfor ann in cp_json['annotations']:\n    ann['id'] = ann['id'] + max_ann\n    ann['image_id'] = ann['image_id'] + max_img\n    ann_list.append(ann)\n\nprint('Cityscapes')\n\nvideo_list.append({\n    'id': max_video,\n    'file_name': 'cityperson'\n})\n\nmix_json = dict()\nmix_json['images'] = img_list\nmix_json['annotations'] = ann_list\nmix_json['videos'] = video_list\nmix_json['categories'] = category_list\njson.dump(mix_json, open('datasets/mix_det/annotations/train.json','w'))\n"
  },
  {
    "path": "tools/mix_data_test_mot20.py",
    "content": "import json\nimport os\n\n\n\"\"\"\ncd datasets\nmkdir -p mix_mot20_ch/annotations\ncp MOT20/annotations/val_half.json mix_mot20_ch/annotations/val_half.json\ncp MOT20/annotations/test.json mix_mot20_ch/annotations/test.json\ncd mix_mot20_ch\nln -s ../MOT20/train mot20_train\nln -s ../crowdhuman/CrowdHuman_train crowdhuman_train\nln -s ../crowdhuman/CrowdHuman_val crowdhuman_val\ncd ..\n\"\"\"\n\nmot_json = json.load(open('datasets/MOT20/annotations/train.json','r'))\n\nimg_list = list()\nfor img in mot_json['images']:\n    img['file_name'] = 'mot20_train/' + img['file_name']\n    img_list.append(img)\n\nann_list = list()\nfor ann in mot_json['annotations']:\n    ann_list.append(ann)\n\nvideo_list = mot_json['videos']\ncategory_list = mot_json['categories']\n\n\nmax_img = 10000\nmax_ann = 2000000\nmax_video = 10\n\ncrowdhuman_json = json.load(open('datasets/crowdhuman/annotations/train.json','r'))\nimg_id_count = 0\nfor img in crowdhuman_json['images']:\n    img_id_count += 1\n    img['file_name'] = 'crowdhuman_train/Images/' + img['file_name']\n    img['frame_id'] = img_id_count\n    img['prev_image_id'] = img['id'] + max_img\n    img['next_image_id'] = img['id'] + max_img\n    img['id'] = img['id'] + max_img\n    img['video_id'] = max_video\n    img_list.append(img)\n    \nfor ann in crowdhuman_json['annotations']:\n    ann['id'] = ann['id'] + max_ann\n    ann['image_id'] = ann['image_id'] + max_img\n    ann_list.append(ann)\n\nvideo_list.append({\n    'id': max_video,\n    'file_name': 'crowdhuman_train'\n})\n\n\nmax_img = 30000\nmax_ann = 10000000\n\ncrowdhuman_val_json = json.load(open('datasets/crowdhuman/annotations/val.json','r'))\nimg_id_count = 0\nfor img in crowdhuman_val_json['images']:\n    img_id_count += 1\n    img['file_name'] = 'crowdhuman_val/Images/' + img['file_name']\n    img['frame_id'] = img_id_count\n    img['prev_image_id'] = img['id'] + max_img\n    img['next_image_id'] = img['id'] + max_img\n    img['id'] = img['id'] + max_img\n    img['video_id'] = max_video\n    img_list.append(img)\n    \nfor ann in crowdhuman_val_json['annotations']:\n    ann['id'] = ann['id'] + max_ann\n    ann['image_id'] = ann['image_id'] + max_img\n    ann_list.append(ann)\n\nvideo_list.append({\n    'id': max_video,\n    'file_name': 'crowdhuman_val'\n})\n\nmix_json = dict()\nmix_json['images'] = img_list\nmix_json['annotations'] = ann_list\nmix_json['videos'] = video_list\nmix_json['categories'] = category_list\njson.dump(mix_json, open('datasets/mix_mot20_ch/annotations/train.json','w'))"
  },
  {
    "path": "tools/mota.py",
    "content": "from loguru import logger\nimport numpy as np\nnp.float = float\nnp.int = int\nnp.object = object\nnp.bool = bool\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nimport sys\nimport os\n\nprj_path = os.path.join(os.path.dirname(__file__), '..')\nif prj_path not in sys.path:\n    sys.path.append(prj_path)\n    \nfrom yolox.core import launch\nfrom yolox.exp import get_exp\nfrom yolox.utils import configure_nccl, fuse_model, get_local_rank, get_model_info, setup_logger\n\nimport argparse\nimport os\nimport random\nimport warnings\nimport glob\nimport motmetrics as mm\nfrom collections import OrderedDict\nfrom pathlib import Path\n\n\ndef compare_dataframes(gts, ts):\n    accs = []\n    names = []\n    for k, tsacc in ts.items():\n        if k in gts:            \n            logger.info('Comparing {}...'.format(k))\n            accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5))\n            names.append(k)\n        else:\n            logger.warning('No ground truth for {}, skipping.'.format(k))\n\n    return accs, names\n\n\n# evaluate MOTA\n\nresults_folder = 'DiffusionTrack_outputs/yolox_x_diffusion_track_mot17_ablation/track_results_mot17_ablation_1_500'\nmm.lap.default_solver = 'lap'\n\ngt_type = '_val_half'\n#gt_type = ''\nprint('gt_type', gt_type)\ngtfiles = glob.glob(\n    os.path.join('datasets/mot/train', '*/gt/gt{}.txt'.format(gt_type)))\nprint('gt_files', gtfiles)\ntsfiles = [f for f in glob.glob(os.path.join(results_folder, '*.txt')) if not os.path.basename(f).startswith('eval')]\n\nlogger.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))\nlogger.info('Available LAP solvers {}'.format(mm.lap.available_solvers))\nlogger.info('Default LAP solver \\'{}\\''.format(mm.lap.default_solver))\nlogger.info('Loading files.')\n\ngt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=1)) for f in gtfiles])\nts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=-1.0)) for f in tsfiles])    \n\nmh = mm.metrics.create()    \naccs, names = compare_dataframes(gt, ts)\n\nlogger.info('Running metrics')\nmetrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked',\n            'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses',\n            'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects']\nsummary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)\n# summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True)\n# print(mm.io.render_summary(\n#   summary, formatters=mh.formatters, \n#   namemap=mm.io.motchallenge_metric_names))\ndiv_dict = {\n    'num_objects': ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations'],\n    'num_unique_objects': ['mostly_tracked', 'partially_tracked', 'mostly_lost']}\nfor divisor in div_dict:\n    for divided in div_dict[divisor]:\n        summary[divided] = (summary[divided] / summary[divisor])\nfmt = mh.formatters\nchange_fmt_list = ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mostly_tracked',\n                    'partially_tracked', 'mostly_lost']\nfor k in change_fmt_list:\n    fmt[k] = fmt['mota']\nprint(mm.io.render_summary(summary, formatters=fmt, namemap=mm.io.motchallenge_metric_names))\n\nmetrics = mm.metrics.motchallenge_metrics + ['num_objects']\nsummary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)\nprint(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))\nlogger.info('Completed')\n"
  },
  {
    "path": "tools/track.py",
    "content": "from loguru import logger\nimport numpy as np\nnp.float = float\nnp.int = int\nnp.object = object\nnp.bool = bool\nimport sys\nimport os\n\nprj_path = os.path.join(os.path.dirname(__file__), '..')\nif prj_path not in sys.path:\n    sys.path.append(prj_path)\n    \nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom yolox.core import launch\nfrom yolox.exp import get_exp\nfrom yolox.utils import configure_nccl, fuse_model, get_local_rank, get_model_info, setup_logger\nfrom yolox.evaluators import DiffusionMOTEvaluatorKL\n\nimport argparse\nimport os\nimport random\nimport warnings\nimport glob\nimport motmetrics as mm\nfrom collections import OrderedDict\nfrom pathlib import Path\n\n\ndef make_parser():\n    parser = argparse.ArgumentParser(\"YOLOX Eval\")\n    parser.add_argument(\"-expn\", \"--experiment-name\", type=str, default=None)\n    parser.add_argument(\"-n\", \"--name\", type=str, default=None, help=\"model name\")\n\n    # distributed\n    parser.add_argument(\n        \"--dist-backend\", default=\"nccl\", type=str, help=\"distributed backend\"\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=None,\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\"-b\", \"--batch-size\", type=int, default=1, help=\"batch size\")\n    parser.add_argument(\n        \"-d\", \"--devices\", default=1, type=int, help=\"device for training\"\n    )\n    parser.add_argument(\n        \"--local_rank\", default=0, type=int, help=\"local rank for dist training\"\n    )\n    parser.add_argument(\n        \"--num_machines\", default=1, type=int, help=\"num of node for training\"\n    )\n    parser.add_argument(\n        \"--machine_rank\", default=0, type=int, help=\"node rank for multi-node training\"\n    )\n    parser.add_argument(\n        \"-f\",\n        \"--exp_file\",\n        default=\"exps/example/mot/yolox_x_diffusion_track_dancetrack.py\",\n        type=str,\n        help=\"pls input your expriment description file\",\n    )\n    parser.add_argument(\n        \"--fp16\",\n        dest=\"fp16\",\n        default=False,\n        action=\"store_true\",\n        help=\"Adopting mix precision evaluating.\",\n    )\n    parser.add_argument(\n        \"--fuse\",\n        dest=\"fuse\",\n        default=True,\n        action=\"store_true\",\n        help=\"Fuse conv and bn for testing.\",\n    )\n    parser.add_argument(\n        \"--trt\",\n        dest=\"trt\",\n        default=False,\n        action=\"store_true\",\n        help=\"Using TensorRT model for testing.\",\n    )\n    parser.add_argument(\n        \"--test\",\n        dest=\"test\",\n        default=False,\n        action=\"store_true\",\n        help=\"Evaluating on test-dev set.\",\n    )\n    parser.add_argument(\n        \"--speed\",\n        dest=\"speed\",\n        default=False,\n        action=\"store_true\",\n        help=\"speed test only.\",\n    )\n    parser.add_argument(\n        \"opts\",\n        help=\"Modify config options using the command-line\",\n        default=None,\n        nargs=argparse.REMAINDER,\n    )\n    \n    parser.add_argument(\"-c\", \"--ckpt\", default=\"diffusiontrack_dancetrack.pth.tar\", type=str, help=\"ckpt for eval\")\n    parser.add_argument(\"--tsize\", default=None, type=int, help=\"test img size\")\n    parser.add_argument(\"--seed\", default=8823, type=int, help=\"eval seed\")\n\n    # det args\n    parser.add_argument(\"--det_thresh\", default=0.7, type=float, help=\"detection conf\")\n    parser.add_argument(\"--nms2d\", default=0.75, type=float, help=\"detection nms threshold\")\n    # tracking args\n\n    parser.add_argument(\"--conf_thresh\", type=float, default=0.25, help=\"tracking confidence threshold\")\n    parser.add_argument(\"--nms3d\", default=0.7, type=float, help=\"association nms threshold\")\n    parser.add_argument(\"--interval\", default=5, type=int, help=\"relink interval\")\n    parser.add_argument(\"--min-box-area\", type=float, default=100, help='filter out tiny boxes')\n    parser.add_argument(\"--mot20\", dest=\"mot20\", default=False, action=\"store_true\", help=\"test mot20.\")\n    return parser\n\n\ndef compare_dataframes(gts, ts):\n    accs = []\n    names = []\n    for k, tsacc in ts.items():\n        if k in gts:            \n            logger.info('Comparing {}...'.format(k))\n            accs.append(mm.utils.compare_to_groundtruth(gts[k], tsacc, 'iou', distth=0.5))\n            names.append(k)\n        else:\n            logger.warning('No ground truth for {}, skipping.'.format(k))\n\n    return accs, names\n\n\n@logger.catch\ndef main(exp, args, num_gpu):\n    if args.seed is not None:\n        random.seed(args.seed)\n        torch.manual_seed(args.seed)\n        cudnn.deterministic = True\n        warnings.warn(\n            \"You have chosen to seed testing. This will turn on the CUDNN deterministic setting, \"\n        )\n\n    is_distributed = num_gpu > 1\n\n    # set environment variables for distributed training\n    cudnn.benchmark = True\n\n    rank = args.local_rank\n    # rank = get_local_rank()\n\n    file_name = os.path.join(exp.output_dir, args.experiment_name)\n\n    if rank == 0:\n        os.makedirs(file_name, exist_ok=True)\n\n    results_folder = os.path.join(file_name, \"track_results_mot20_test\")\n    os.makedirs(results_folder, exist_ok=True)\n\n    setup_logger(file_name, distributed_rank=rank, filename=\"val_log.txt\", mode=\"a\")\n    logger.info(\"Args: {}\".format(args))\n\n    if args.conf_thresh is not None:\n        exp.conf_thresh = args.conf_thresh\n    if args.nms2d is not None:\n        exp.nms_thresh2d = args.nms2d\n    if args.det_thresh is not None:\n        exp.det_thresh = args.det_thresh\n    if args.nms3d is not None:\n        exp.nms_thresh3d = args.nms3d\n    if args.interval is not None:\n        exp.interval=args.interval\n    if args.tsize is not None:\n        exp.test_size = (args.tsize, args.tsize)\n\n    model = exp.get_model()\n    # logger.info(\"Model Summary: {}\".format(get_model_info(model, exp.test_size)))\n    #logger.info(\"Model Structure:\\n{}\".format(str(model)))\n\n    val_loader = exp.get_eval_loader(args.batch_size, is_distributed, args.test)\n    evaluator = DiffusionMOTEvaluatorKL(\n        args=args,\n        dataloader=val_loader,\n        img_size=exp.test_size,\n        confthre=exp.conf_thresh,\n        nmsthre3d=exp.nms_thresh3d,\n        detthre=exp.det_thresh,\n        nmsthre2d=exp.nms_thresh2d,\n        interval=exp.interval,\n        num_classes=exp.num_classes,\n        )\n\n    torch.cuda.set_device(rank)\n    model.cuda(rank)\n    model.eval()\n\n    if not args.speed and not args.trt:\n        if args.ckpt is None:\n            ckpt_file = os.path.join(file_name, \"best_ckpt.pth.tar\")\n        else:\n            ckpt_file = args.ckpt\n        logger.info(\"loading checkpoint\")\n        loc = \"cuda:{}\".format(rank)\n        ckpt = torch.load(ckpt_file, map_location=loc)\n        # load the model state dict\n        model.load_state_dict(ckpt[\"model\"])\n        logger.info(\"loaded checkpoint done.\")\n\n    if is_distributed:\n        model = DDP(model, device_ids=[rank])\n\n    if args.fuse:\n        logger.info(\"\\tFusing model...\")\n        model = fuse_model(model)\n\n    if args.trt:\n        assert (\n            not args.fuse and not is_distributed and args.batch_size == 1\n        ), \"TensorRT model is not support model fusing and distributed inferencing!\"\n        trt_file = os.path.join(file_name, \"model_trt.pth\")\n        assert os.path.exists(\n            trt_file\n        ), \"TensorRT model is not found!\\n Run tools/trt.py first!\"\n        model.head.decode_in_inference = False\n        decoder = model.head.decode_outputs\n    else:\n        trt_file = None\n        decoder = None\n\n    # start evaluate\n    *_, summary = evaluator.evaluate(\n        model, is_distributed, args.fp16, trt_file, decoder, exp.test_size, results_folder\n    )\n    logger.info(\"\\n\" + summary)\n\n    # evaluate MOTA\n    mm.lap.default_solver = 'lap'\n\n    if exp.val_ann == 'val_half.json':\n        gt_type = '_val_half'\n    else:\n        gt_type = ''\n    print('gt_type', gt_type)\n    if args.mot20:\n        gtfiles = glob.glob(os.path.join('datasets/MOT20/train', '*/gt/gt{}.txt'.format(gt_type)))\n    else:\n        gtfiles = glob.glob(os.path.join('datasets/mot/train', '*/gt/gt{}.txt'.format(gt_type)))\n    print('gt_files', gtfiles)\n    tsfiles = [f for f in glob.glob(os.path.join(results_folder, '*.txt')) if not os.path.basename(f).startswith('eval')]\n\n    logger.info('Found {} groundtruths and {} test files.'.format(len(gtfiles), len(tsfiles)))\n    logger.info('Available LAP solvers {}'.format(mm.lap.available_solvers))\n    logger.info('Default LAP solver \\'{}\\''.format(mm.lap.default_solver))\n    logger.info('Loading files.')\n    \n    gt = OrderedDict([(Path(f).parts[-3], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=1)) for f in gtfiles])\n    ts = OrderedDict([(os.path.splitext(Path(f).parts[-1])[0], mm.io.loadtxt(f, fmt='mot15-2D', min_confidence=-1)) for f in tsfiles])    \n    \n    mh = mm.metrics.create()    \n    accs, names = compare_dataframes(gt, ts)\n    \n    logger.info('Running metrics')\n    metrics = ['recall', 'precision', 'num_unique_objects', 'mostly_tracked',\n               'partially_tracked', 'mostly_lost', 'num_false_positives', 'num_misses',\n               'num_switches', 'num_fragmentations', 'mota', 'motp', 'num_objects']\n    summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)\n    # summary = mh.compute_many(accs, names=names, metrics=mm.metrics.motchallenge_metrics, generate_overall=True)\n    # print(mm.io.render_summary(\n    #   summary, formatters=mh.formatters, \n    #   namemap=mm.io.motchallenge_metric_names))\n    div_dict = {\n        'num_objects': ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations'],\n        'num_unique_objects': ['mostly_tracked', 'partially_tracked', 'mostly_lost']}\n    for divisor in div_dict:\n        for divided in div_dict[divisor]:\n            summary[divided] = (summary[divided] / summary[divisor])\n    fmt = mh.formatters\n    change_fmt_list = ['num_false_positives', 'num_misses', 'num_switches', 'num_fragmentations', 'mostly_tracked',\n                       'partially_tracked', 'mostly_lost']\n    for k in change_fmt_list:\n        fmt[k] = fmt['mota']\n    print(mm.io.render_summary(summary, formatters=fmt, namemap=mm.io.motchallenge_metric_names))\n\n    metrics = mm.metrics.motchallenge_metrics + ['num_objects']\n    summary = mh.compute_many(accs, names=names, metrics=metrics, generate_overall=True)\n    print(mm.io.render_summary(summary, formatters=mh.formatters, namemap=mm.io.motchallenge_metric_names))\n    logger.info('Completed')\n\n\nif __name__ == \"__main__\":\n    args = make_parser().parse_args()\n    exp = get_exp(args.exp_file, args.name)\n    exp.merge(args.opts)\n\n    if not args.experiment_name:\n        args.experiment_name = exp.exp_name\n\n    num_gpu = torch.cuda.device_count() if args.devices is None else args.devices\n    assert num_gpu <= torch.cuda.device_count()\n\n    launch(\n        main,\n        num_gpu,\n        args.num_machines,\n        args.machine_rank,\n        backend=args.dist_backend,\n        dist_url=args.dist_url,\n        args=(exp, args, num_gpu),\n    )\n"
  },
  {
    "path": "tools/train.py",
    "content": "from loguru import logger\nimport numpy as np\nnp.float = float\nnp.int = int\nnp.object = object\nnp.bool = bool\nimport torch\nimport torch.backends.cudnn as cudnn\nimport os\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"2,3,4,5,6,7\"\nimport sys\nprj_path = os.path.join(os.path.dirname(__file__), '..')\n\nif prj_path not in sys.path:\n    sys.path.append(prj_path)\nfrom yolox.core import Trainer, launch\nfrom yolox.exp import get_exp\n\nimport argparse\nimport random\nimport warnings\n\n\ndef make_parser():\n    parser = argparse.ArgumentParser(\"YOLOX train parser\")\n    parser.add_argument(\"-expn\", \"--experiment-name\", type=str, default=None)\n    parser.add_argument(\"-n\", \"--name\", type=str, default=None, help=\"model name\")\n\n    # distributed\n    parser.add_argument(\n        \"--dist-backend\", default=\"nccl\", type=str, help=\"distributed backend\"\n    )\n    parser.add_argument(\n        \"--dist-url\",\n        default=None,\n        type=str,\n        help=\"url used to set up distributed training\",\n    )\n    parser.add_argument(\"-b\", \"--batch-size\", type=int, default=2*8, help=\"batch size\")\n    parser.add_argument(\n        \"-d\", \"--devices\", default=8, type=int, help=\"device for training\"\n    )\n    parser.add_argument(\n        \"--local_rank\", default=0, type=int, help=\"local rank for dist training\"\n    )\n    parser.add_argument(\n        \"-f\",\n        \"--exp_file\",\n        default=\"exps/example/mot/yolox_x_diffusion_track_dancetrack_baseline.py\",\n        type=str,\n        help=\"plz input your expriment description file\",\n    )\n    parser.add_argument(\n        \"--resume\", default=False, action=\"store_true\", help=\"resume training\"\n    )\n    parser.add_argument(\"-c\", \"--ckpt\", default=\"diffusion_dancetrack_det.pth.tar\", type=str, help=\"checkpoint file\")\n    parser.add_argument(\n        \"-e\",\n        \"--start_epoch\",\n        default=None,\n        type=int,\n        help=\"resume training start epoch\",\n    )\n    parser.add_argument(\n        \"--num_machines\", default=1, type=int, help=\"num of node for training\"\n    )\n    parser.add_argument(\n        \"--machine_rank\", default=0, type=int, help=\"node rank for multi-node training\"\n    )\n    parser.add_argument(\n        \"--fp16\", \n        dest=\"fp16\",\n        default=False,\n        action=\"store_true\",\n        help=\"Adopting mix precision training.\",\n    )\n    parser.add_argument(\n        \"-o\",\n        \"--occupy\",\n        dest=\"occupy\",\n        default=False,\n        action=\"store_true\",\n        help=\"occupy GPU memory first for training.\",\n    )\n    parser.add_argument(\n        \"opts\",\n        help=\"Modify config options using the command-line\",\n        default=None,\n        nargs=argparse.REMAINDER,\n    )\n    return parser\n\n\n@logger.catch\ndef main(exp, args):\n    if exp.seed is not None:\n        random.seed(exp.seed)\n        torch.manual_seed(exp.seed)\n        cudnn.deterministic = True\n        warnings.warn(\n            \"You have chosen to seed training. This will turn on the CUDNN deterministic setting, \"\n            \"which can slow down your training considerably! You may see unexpected behavior \"\n            \"when restarting from checkpoints.\"\n        )\n\n    # set environment variables for distributed training\n    cudnn.benchmark = True\n\n    trainer = Trainer(exp, args)\n    trainer.train()\n\n\nif __name__ == \"__main__\":\n    args = make_parser().parse_args()\n    # args.exp_file=f\n    # args.ckpt=c\n    exp = get_exp(args.exp_file, args.name)\n    exp.merge(args.opts)\n\n    if not args.experiment_name:\n        args.experiment_name = exp.exp_name\n\n    num_gpu = torch.cuda.device_count() if args.devices is None else args.devices\n    assert num_gpu <= torch.cuda.device_count()\n\n    launch(\n        main,\n        num_gpu,\n        args.num_machines, \n        args.machine_rank,\n        backend=args.dist_backend,\n        dist_url=args.dist_url,\n        args=(exp, args),\n    )\n"
  },
  {
    "path": "tools/txt2video.py",
    "content": "import os\nimport sys\nimport json\nimport cv2\nimport glob as gb\nimport numpy as np\n\n\ndef colormap(rgb=False):\n    color_list = np.array(\n        [\n            0.000, 0.447, 0.741,\n            0.850, 0.325, 0.098,\n            0.929, 0.694, 0.125,\n            0.494, 0.184, 0.556,\n            0.466, 0.674, 0.188,\n            0.301, 0.745, 0.933,\n            0.635, 0.078, 0.184,\n            0.300, 0.300, 0.300,\n            0.600, 0.600, 0.600,\n            1.000, 0.000, 0.000,\n            1.000, 0.500, 0.000,\n            0.749, 0.749, 0.000,\n            0.000, 1.000, 0.000,\n            0.000, 0.000, 1.000,\n            0.667, 0.000, 1.000,\n            0.333, 0.333, 0.000,\n            0.333, 0.667, 0.000,\n            0.333, 1.000, 0.000,\n            0.667, 0.333, 0.000,\n            0.667, 0.667, 0.000,\n            0.667, 1.000, 0.000,\n            1.000, 0.333, 0.000,\n            1.000, 0.667, 0.000,\n            1.000, 1.000, 0.000,\n            0.000, 0.333, 0.500,\n            0.000, 0.667, 0.500,\n            0.000, 1.000, 0.500,\n            0.333, 0.000, 0.500,\n            0.333, 0.333, 0.500,\n            0.333, 0.667, 0.500,\n            0.333, 1.000, 0.500,\n            0.667, 0.000, 0.500,\n            0.667, 0.333, 0.500,\n            0.667, 0.667, 0.500,\n            0.667, 1.000, 0.500,\n            1.000, 0.000, 0.500,\n            1.000, 0.333, 0.500,\n            1.000, 0.667, 0.500,\n            1.000, 1.000, 0.500,\n            0.000, 0.333, 1.000,\n            0.000, 0.667, 1.000,\n            0.000, 1.000, 1.000,\n            0.333, 0.000, 1.000,\n            0.333, 0.333, 1.000,\n            0.333, 0.667, 1.000,\n            0.333, 1.000, 1.000,\n            0.667, 0.000, 1.000,\n            0.667, 0.333, 1.000,\n            0.667, 0.667, 1.000,\n            0.667, 1.000, 1.000,\n            1.000, 0.000, 1.000,\n            1.000, 0.333, 1.000,\n            1.000, 0.667, 1.000,\n            0.167, 0.000, 0.000,\n            0.333, 0.000, 0.000,\n            0.500, 0.000, 0.000,\n            0.667, 0.000, 0.000,\n            0.833, 0.000, 0.000,\n            1.000, 0.000, 0.000,\n            0.000, 0.167, 0.000,\n            0.000, 0.333, 0.000,\n            0.000, 0.500, 0.000,\n            0.000, 0.667, 0.000,\n            0.000, 0.833, 0.000,\n            0.000, 1.000, 0.000,\n            0.000, 0.000, 0.167,\n            0.000, 0.000, 0.333,\n            0.000, 0.000, 0.500,\n            0.000, 0.000, 0.667,\n            0.000, 0.000, 0.833,\n            0.000, 0.000, 1.000,\n            0.000, 0.000, 0.000,\n            0.143, 0.143, 0.143,\n            0.286, 0.286, 0.286,\n            0.429, 0.429, 0.429,\n            0.571, 0.571, 0.571,\n            0.714, 0.714, 0.714,\n            0.857, 0.857, 0.857,\n            1.000, 1.000, 1.000\n        ]\n    ).astype(np.float32)\n    color_list = color_list.reshape((-1, 3)) * 255\n    if not rgb:\n        color_list = color_list[:, ::-1]\n    return color_list\n\n\ndef txt2img(visual_path=\"visual_val_gt\"):\n    print(\"Starting txt2img\")\n\n    valid_labels = {1}\n    ignore_labels = {2, 7, 8, 12}\n\n    if not os.path.exists(visual_path):\n        os.makedirs(visual_path)\n    color_list = colormap()\n\n    gt_json_path = 'datasets/mot/annotations/val_half.json'\n    img_path = 'datasets/mot/train/'\n    show_video_names = ['MOT17-02-FRCNN', \n                    'MOT17-04-FRCNN',\n                    'MOT17-05-FRCNN',\n                    'MOT17-09-FRCNN',\n                    'MOT17-10-FRCNN',        \n                    'MOT17-11-FRCNN',\n                    'MOT17-13-FRCNN']\n\n\n    test_json_path = 'datasets/mot/annotations/test.json'\n    test_img_path = 'datasets/mot/test/'\n    test_show_video_names = ['MOT17-01-FRCNN', \n                    'MOT17-03-FRCNN',\n                    'MOT17-06-FRCNN',\n                    'MOT17-07-FRCNN',\n                    'MOT17-08-FRCNN',        \n                    'MOT17-12-FRCNN',\n                    'MOT17-14-FRCNN']\n    if visual_path == \"visual_test_predict\":\n        show_video_names = test_show_video_names\n        img_path = test_img_path\n        gt_json_path = test_json_path\n    for show_video_name in show_video_names:\n        img_dict = dict()\n        \n        if visual_path == \"visual_val_gt\":\n            txt_path = 'datasets/mot/train/' + show_video_name + '/gt/gt_val_half.txt'\n        elif visual_path == \"visual_yolox_x\":\n            txt_path = 'YOLOX_outputs/yolox_mot_x_1088/track_results/'+ show_video_name + '.txt'\n        elif visual_path == \"visual_test_predict\":\n            txt_path = 'test/tracks/'+ show_video_name + '.txt'\n        else:\n            raise NotImplementedError\n        \n        with open(gt_json_path, 'r') as f:\n            gt_json = json.load(f)\n\n        for ann in gt_json[\"images\"]:\n            file_name = ann['file_name']\n            video_name = file_name.split('/')[0]\n            if video_name == show_video_name:\n                img_dict[ann['frame_id']] = img_path + file_name\n\n\n        txt_dict = dict()    \n        with open(txt_path, 'r') as f:\n            for line in f.readlines():\n                linelist = line.split(',')\n\n                mark = int(float(linelist[6]))\n                label = int(float(linelist[7]))\n                vis_ratio = float(linelist[8])\n                \n                if visual_path == \"visual_val_gt\":\n                    if mark == 0 or label not in valid_labels or label in ignore_labels or vis_ratio <= 0:\n                        continue\n\n                img_id = linelist[0]\n                obj_id = linelist[1]\n                bbox = [float(linelist[2]), float(linelist[3]), \n                        float(linelist[2]) + float(linelist[4]), \n                        float(linelist[3]) + float(linelist[5]), int(obj_id)]\n                if int(img_id) in txt_dict:\n                    txt_dict[int(img_id)].append(bbox)\n                else:\n                    txt_dict[int(img_id)] = list()\n                    txt_dict[int(img_id)].append(bbox)\n\n        for img_id in sorted(txt_dict.keys()):\n            img = cv2.imread(img_dict[img_id])\n            for bbox in txt_dict[img_id]:\n                cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color_list[bbox[4]%79].tolist(), thickness=2)\n                cv2.putText(img, \"{}\".format(int(bbox[4])), (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color_list[bbox[4]%79].tolist(), 2)\n            cv2.imwrite(visual_path + \"/\" + show_video_name + \"{:0>6d}.png\".format(img_id), img)\n        print(show_video_name, \"Done\")\n    print(\"txt2img Done\")\n\n        \ndef img2video(visual_path=\"visual_val_gt\"):\n    print(\"Starting img2video\")\n\n    img_paths = gb.glob(visual_path + \"/*.png\") \n    fps = 16 \n    size = (1920,1080) \n    videowriter = cv2.VideoWriter(visual_path + \"_video.avi\",cv2.VideoWriter_fourcc('M','J','P','G'), fps, size)\n\n    for img_path in sorted(img_paths):\n        img = cv2.imread(img_path)\n        img = cv2.resize(img, size)\n        videowriter.write(img)\n\n    videowriter.release()\n    print(\"img2video Done\")\n\n\nif __name__ == '__main__':\n    visual_path=\"visual_yolox_x\"\n    if len(sys.argv) > 1:\n        visual_path =sys.argv[1]\n    txt2img(visual_path)\n    #img2video(visual_path)\n"
  },
  {
    "path": "yolox/__init__.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nfrom .utils import configure_module\n\nconfigure_module()\n\n__version__ = \"0.1.0\"\n"
  },
  {
    "path": "yolox/core/__init__.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom .launch import launch\nfrom .trainer import Trainer\n"
  },
  {
    "path": "yolox/core/launch.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Code are based on\n# https://github.com/facebookresearch/detectron2/blob/master/detectron2/engine/launch.py\n# Copyright (c) Facebook, Inc. and its affiliates.\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom loguru import logger\n\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\n\nimport yolox.utils.dist as comm\nfrom yolox.utils import configure_nccl\n\nimport os\nimport subprocess\nimport sys\nimport time\n\n__all__ = [\"launch\"]\n\n\ndef _find_free_port():\n    \"\"\"\n    Find an available port of current machine / node.\n    \"\"\"\n    import socket\n\n    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n    # Binding to port 0 will cause the OS to find an available port for us\n    sock.bind((\"\", 0))\n    port = sock.getsockname()[1]\n    sock.close()\n    # NOTE: there is still a chance the port could be taken by other processes.\n    return port\n\n\ndef launch(\n    main_func,\n    num_gpus_per_machine,\n    num_machines=1,\n    machine_rank=0,\n    backend=\"nccl\",\n    dist_url=None,\n    args=(),\n):\n    \"\"\"\n    Args:\n        main_func: a function that will be called by `main_func(*args)`\n        num_machines (int): the total number of machines\n        machine_rank (int): the rank of this machine (one per machine)\n        dist_url (str): url to connect to for distributed training, including protocol\n                       e.g. \"tcp://127.0.0.1:8686\".\n                       Can be set to auto to automatically select a free port on localhost\n        args (tuple): arguments passed to main_func\n    \"\"\"\n    world_size = num_machines * num_gpus_per_machine\n    if world_size > 1:\n        if int(os.environ.get(\"WORLD_SIZE\", \"1\")) > 1:\n            dist_url = \"{}:{}\".format(\n                os.environ.get(\"MASTER_ADDR\", None),\n                os.environ.get(\"MASTER_PORT\", \"None\"),\n            )\n            local_rank = int(os.environ.get(\"LOCAL_RANK\", \"0\"))\n            world_size = int(os.environ.get(\"WORLD_SIZE\", \"1\"))\n            _distributed_worker(\n                local_rank,\n                main_func,\n                world_size,\n                num_gpus_per_machine,\n                num_machines,\n                machine_rank,\n                backend,\n                dist_url,\n                args,\n            )\n            exit()\n        launch_by_subprocess(\n            sys.argv,\n            world_size,\n            num_machines,\n            machine_rank,\n            num_gpus_per_machine,\n            dist_url,\n            args,\n        )\n    else:\n        main_func(*args)\n\n\ndef launch_by_subprocess(\n    raw_argv,\n    world_size,\n    num_machines,\n    machine_rank,\n    num_gpus_per_machine,\n    dist_url,\n    args,\n):\n    assert (\n        world_size > 1\n    ), \"subprocess mode doesn't support single GPU, use spawn mode instead\"\n\n    if dist_url is None:\n        # ------------------------hack for multi-machine training -------------------- #\n        if num_machines > 1:\n            master_ip = subprocess.check_output([\"hostname\", \"--fqdn\"]).decode(\"utf-8\")\n            master_ip = str(master_ip).strip()\n            dist_url = \"tcp://{}\".format(master_ip)\n            ip_add_file = \"./\" + args[1].experiment_name + \"_ip_add.txt\"\n            if machine_rank == 0:\n                port = _find_free_port()\n                with open(ip_add_file, \"w\") as ip_add:\n                    ip_add.write(dist_url+'\\n')\n                    ip_add.write(str(port))\n            else:\n                while not os.path.exists(ip_add_file):\n                    time.sleep(0.5)\n\n                with open(ip_add_file, \"r\") as ip_add:\n                    dist_url = ip_add.readline().strip()\n                    port = ip_add.readline()\n        else:\n            dist_url = \"tcp://127.0.0.1\"\n            port = _find_free_port()\n\n    # set PyTorch distributed related environmental variables\n    current_env = os.environ.copy()\n    current_env[\"MASTER_ADDR\"] = dist_url\n    current_env[\"MASTER_PORT\"] = str(port)\n    current_env[\"WORLD_SIZE\"] = str(world_size)\n    assert num_gpus_per_machine <= torch.cuda.device_count()\n\n    if \"OMP_NUM_THREADS\" not in os.environ and num_gpus_per_machine > 1:\n        current_env[\"OMP_NUM_THREADS\"] = str(1)\n        logger.info(\n            \"\\n*****************************************\\n\"\n            \"Setting OMP_NUM_THREADS environment variable for each process \"\n            \"to be {} in default, to avoid your system being overloaded, \"\n            \"please further tune the variable for optimal performance in \"\n            \"your application as needed. \\n\"\n            \"*****************************************\".format(\n                current_env[\"OMP_NUM_THREADS\"]\n            )\n        )\n\n    processes = []\n    for local_rank in range(0, num_gpus_per_machine):\n        # each process's rank\n        dist_rank = machine_rank * num_gpus_per_machine + local_rank\n        current_env[\"RANK\"] = str(dist_rank)\n        current_env[\"LOCAL_RANK\"] = str(local_rank)\n\n        # spawn the processes\n        cmd = [\"python3\", *raw_argv]\n\n        process = subprocess.Popen(cmd, env=current_env)\n        processes.append(process)\n\n    for process in processes:\n        process.wait()\n        if process.returncode != 0:\n            raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)\n\n\ndef _distributed_worker(\n    local_rank,\n    main_func,\n    world_size,\n    num_gpus_per_machine,\n    num_machines,\n    machine_rank,\n    backend,\n    dist_url,\n    args,\n):\n    assert (\n        torch.cuda.is_available()\n    ), \"cuda is not available. Please check your installation.\"\n    configure_nccl()\n    global_rank = machine_rank * num_gpus_per_machine + local_rank\n    logger.info(\"Rank {} initialization finished.\".format(global_rank))\n    try:\n        dist.init_process_group(\n            backend=backend,\n            init_method=dist_url,\n            world_size=world_size,\n            rank=global_rank,\n        )\n    except Exception:\n        logger.error(\"Process group URL: {}\".format(dist_url))\n        raise\n    # synchronize is needed here to prevent a possible timeout after calling init_process_group\n    # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172\n    comm.synchronize()\n\n    if global_rank == 0 and os.path.exists(\n        \"./\" + args[1].experiment_name + \"_ip_add.txt\"\n    ):\n        os.remove(\"./\" + args[1].experiment_name + \"_ip_add.txt\")\n\n    assert num_gpus_per_machine <= torch.cuda.device_count()\n    torch.cuda.set_device(local_rank)\n\n    args[1].local_rank = local_rank\n    args[1].num_machines = num_machines\n\n    # Setup the local process group (which contains ranks within the same machine)\n    # assert comm._LOCAL_PROCESS_GROUP is None\n    # num_machines = world_size // num_gpus_per_machine\n    # for i in range(num_machines):\n    # ranks_on_i = list(range(i * num_gpus_per_machine, (i + 1) * num_gpus_per_machine))\n    # pg = dist.new_group(ranks_on_i)\n    # if i == machine_rank:\n    # comm._LOCAL_PROCESS_GROUP = pg\n\n    main_func(*args)\n"
  },
  {
    "path": "yolox/core/trainer.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom loguru import logger\n\nimport torch\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom tensorboardX import SummaryWriter\n\nfrom yolox.data import DataPrefetcher\nfrom yolox.utils import (\n    MeterBuffer,\n    ModelEMA,\n    all_reduce_norm,\n    get_model_info,\n    get_rank,\n    get_world_size,\n    gpu_mem_usage,\n    load_ckpt,\n    occupy_mem,\n    save_checkpoint,\n    setup_logger,\n    synchronize,\n)\n\nimport datetime\nimport os\nimport time\n\n\nclass Trainer:\n    def __init__(self, exp, args):\n        # init function only defines some basic attr, other attrs like model, optimizer are built in\n        # before_train methods.\n        self.exp = exp\n        self.args = args\n\n        # training related attr\n        self.task=exp.task\n        self.max_epoch = exp.max_epoch\n        self.amp_training = args.fp16\n        self.scaler = torch.cuda.amp.GradScaler(enabled=args.fp16)\n        self.is_distributed = get_world_size() > 1\n        self.rank = get_rank()\n        self.local_rank = args.local_rank\n        self.device = \"cuda:{}\".format(self.local_rank)\n        self.use_model_ema = exp.ema\n\n        # data/dataloader related attr\n        self.data_type = torch.float16 if args.fp16 else torch.float32\n        self.input_size = exp.input_size\n        self.random_flip=exp.random_flip\n        self.best_ap = 0\n\n        # metric record\n        self.meter = MeterBuffer(window_size=exp.print_interval)\n        self.file_name = os.path.join(exp.output_dir, args.experiment_name)\n\n        if self.rank == 0:\n            os.makedirs(self.file_name, exist_ok=True)\n\n        setup_logger(\n            self.file_name,\n            distributed_rank=self.rank,\n            filename=\"train_log.txt\",\n            mode=\"a\",\n        )\n\n    def train(self):\n        self.before_train()\n        try:\n            self.train_in_epoch()\n        except Exception:\n            raise\n        finally:\n            self.after_train()\n\n    def train_in_epoch(self):\n        for self.epoch in range(self.start_epoch, self.max_epoch):\n            self.before_epoch()\n            self.train_in_iter()\n            self.after_epoch()\n\n    def train_in_iter(self):\n        for self.iter in range(self.max_iter):\n            self.before_iter()\n            self.train_one_iter()\n            self.after_iter()\n\n    def train_one_iter(self):\n        iter_start_time = time.time()\n        pre_inps, pre_targets,cur_inps,cur_targets= self.prefetcher.next()\n        pre_inps = pre_inps.to(self.data_type)\n        pre_targets = pre_targets[:,:,:5].to(self.data_type)\n        pre_targets.requires_grad = False\n        if self.task==\"tracking\":\n            cur_inps = cur_inps.to(self.data_type)\n            cur_targets = cur_targets[:,:,:5].to(self.data_type)\n            cur_targets.requires_grad = False\n\n        data_end_time = time.time()\n        inps,targets=(pre_inps,cur_inps),(pre_targets,cur_targets)\n        with torch.cuda.amp.autocast(enabled=self.amp_training):\n            outputs = self.model(inps,targets,self.random_flip,self.input_size)\n        loss = outputs[\"total_loss\"]\n\n        self.optimizer.zero_grad()\n        self.scaler.scale(loss).backward()\n        self.scaler.step(self.optimizer)\n        self.scaler.update()\n\n        if self.use_model_ema:\n            self.ema_model.update(self.model)\n\n        lr = self.lr_scheduler.update_lr(self.progress_in_iter + 1)\n        for param_group in self.optimizer.param_groups:\n            param_group[\"lr\"] = lr\n\n        iter_end_time = time.time()\n        self.meter.update(\n            iter_time=iter_end_time - iter_start_time,\n            data_time=data_end_time - iter_start_time,\n            lr=lr,\n            **outputs,\n        )\n\n    def before_train(self):\n        logger.info(\"args: {}\".format(self.args))\n        logger.info(\"exp value:\\n{}\".format(self.exp))\n\n        # model related init\n        torch.cuda.set_device(self.local_rank)\n        model = self.exp.get_model()\n        # logger.info(\n        #     \"Model Summary: {}\".format(get_model_info(model, self.exp.test_size))\n        # )\n        model.to(self.device)\n\n        # solver related init\n        self.optimizer = self.exp.get_optimizer(self.args.batch_size)\n\n        # value of epoch will be set in `resume_train`\n        model = self.resume_train(model)\n\n        # data related init\n        self.no_aug = self.start_epoch >= self.max_epoch - self.exp.no_aug_epochs\n        self.train_loader = self.exp.get_data_loader(\n            batch_size=self.args.batch_size,\n            is_distributed=self.is_distributed,\n            no_aug=self.no_aug,\n        )\n        logger.info(\"init prefetcher, this might take one minute or less...\")\n        self.prefetcher = DataPrefetcher(self.train_loader,self.task)\n        # max_iter means iters per epoch\n        self.max_iter = len(self.train_loader)\n\n        self.lr_scheduler = self.exp.get_lr_scheduler(\n            self.exp.basic_lr_per_img * self.args.batch_size, self.max_iter\n        )\n        if self.args.occupy:\n            occupy_mem(self.local_rank)\n\n        if self.is_distributed:\n            model = DDP(model, device_ids=[self.local_rank], broadcast_buffers=False,find_unused_parameters=False)\n\n        if self.use_model_ema:\n            self.ema_model = ModelEMA(model, 0.9998)\n            self.ema_model.updates = self.max_iter * self.start_epoch\n\n        self.model = model\n        self.model.train()\n\n        self.evaluator = self.exp.get_evaluator(\n            batch_size=self.args.batch_size, is_distributed=self.is_distributed\n        )\n        # Tensorboard logger\n        if self.rank == 0:\n            self.tblogger = SummaryWriter(self.file_name)\n\n        logger.info(\"Training start...\")\n        #logger.info(\"\\n{}\".format(model))\n\n    def after_train(self):\n        logger.info(\n            \"Training of experiment is done and the best AP is {:.2f}\".format(\n                self.best_ap * 100\n            )\n        )\n\n    def before_epoch(self):\n        logger.info(\"---> start train epoch{}\".format(self.epoch + 1))\n\n        if self.epoch + 1 == self.max_epoch - self.exp.no_aug_epochs or self.no_aug:\n            \n            logger.info(\"--->No mosaic aug now!\")\n            self.train_loader.close_mosaic()\n            logger.info(\"--->Add additional L1 loss now!\")\n            if self.is_distributed:\n                self.model.module.head.use_l1 = True\n            else:\n                self.model.head.use_l1 = True\n            \n            self.exp.eval_interval = 1\n            if not self.no_aug:\n                self.save_ckpt(ckpt_name=\"last_mosaic_epoch\")\n\n    def after_epoch(self):\n        if self.use_model_ema:\n            self.ema_model.update_attr(self.model)\n\n        self.save_ckpt(ckpt_name=\"latest\")\n        if (self.epoch + 1) % 10 == 0:\n            self.save_ckpt(ckpt_name=\"epoch_{}\".format(self.epoch+1))\n        if (self.epoch + 1) % self.exp.eval_interval == 0: \n            all_reduce_norm(self.model)\n            self.evaluate_and_save_model() \n\n    def before_iter(self):\n        pass\n\n    def after_iter(self):\n        \"\"\"\n        `after_iter` contains two parts of logic:\n            * log information\n            * reset setting of resize\n        \"\"\"\n        # log needed information\n        # (self.iter + 1) % self.exp.print_interval == 0 and\n        if (self.iter + 1) % self.exp.print_interval == 0:\n            # TODO check ETA logic\n            left_iters = self.max_iter * self.max_epoch - (self.progress_in_iter + 1)\n            eta_seconds = self.meter[\"iter_time\"].global_avg * left_iters\n            eta_str = \"ETA: {}\".format(datetime.timedelta(seconds=int(eta_seconds)))\n\n            progress_str = \"epoch: {}/{}, iter: {}/{}\".format(\n                self.epoch + 1, self.max_epoch, self.iter + 1, self.max_iter\n            )\n            loss_meter = self.meter.get_filtered_meter(\"loss\")\n            loss_str = \", \".join(\n                [\"{}: {:.3f}\".format(k, v.latest) for k, v in loss_meter.items()]\n            )\n\n            time_meter = self.meter.get_filtered_meter(\"time\")\n            time_str = \", \".join(\n                [\"{}: {:.3f}s\".format(k, v.avg) for k, v in time_meter.items()]\n            )\n\n            logger.info(\n                \"{}, mem: {:.0f}Mb, {}, {}, lr: {:.3e}\".format(\n                    progress_str,\n                    gpu_mem_usage(),\n                    time_str,\n                    loss_str,\n                    self.meter[\"lr\"].latest,\n                )\n                + (\", size: {:d}, {}\".format(self.input_size[0], eta_str))\n            )\n            self.meter.clear_meters()\n\n        # random resizing\n        if self.exp.random_size is not None and (self.progress_in_iter + 1) % 10 == 0:\n            self.input_size = self.exp.random_resize(\n                self.train_loader, self.epoch, self.rank, self.is_distributed\n            )\n\n    @property\n    def progress_in_iter(self):\n        return self.epoch * self.max_iter + self.iter\n\n    def resume_train(self, model):\n        if self.args.resume:\n            logger.info(\"resume training\")\n            if self.args.ckpt is None:\n                ckpt_file = os.path.join(self.file_name, \"latest\" + \"_ckpt.pth.tar\")\n            else:\n                ckpt_file = self.args.ckpt\n\n            ckpt = torch.load(ckpt_file, map_location=self.device)\n            # resume the model/optimizer state dict\n            model.load_state_dict(ckpt[\"model\"])\n            self.optimizer.load_state_dict(ckpt[\"optimizer\"])\n            start_epoch = (\n                self.args.start_epoch - 1\n                if self.args.start_epoch is not None\n                else ckpt[\"start_epoch\"]\n            )\n            self.start_epoch = start_epoch\n            logger.info(\n                \"loaded checkpoint '{}' (epoch {})\".format(\n                    self.args.resume, self.start_epoch\n                )\n            )  # noqa\n        else:\n            if self.args.ckpt is not None:\n                logger.info(\"loading checkpoint for fine tuning\")\n                ckpt_file = self.args.ckpt\n                ckpt = torch.load(ckpt_file, map_location=self.device)[\"model\"]\n                model = load_ckpt(model, ckpt)\n            self.start_epoch = 0\n\n        return model\n\n    def evaluate_and_save_model(self):\n        evalmodel = self.ema_model.ema if self.use_model_ema else self.model\n        ap50_95, ap50, summary = self.exp.eval(\n            evalmodel, self.evaluator, self.is_distributed\n        )\n        self.model.train()\n        if self.rank == 0:\n            self.tblogger.add_scalar(\"val/COCOAP50\", ap50, self.epoch + 1)\n            self.tblogger.add_scalar(\"val/COCOAP50_95\", ap50_95, self.epoch + 1)\n            logger.info(\"\\n\" + summary)\n        synchronize()\n\n        self.best_ap = max(self.best_ap, ap50_95)\n        self.save_ckpt(\"last_epoch\", ap50 > self.best_ap)\n        self.best_ap = max(self.best_ap, ap50)\n\n    def save_ckpt(self, ckpt_name, update_best_ckpt=False):\n        if self.rank == 0:\n            save_model = self.ema_model.ema if self.use_model_ema else self.model\n            logger.info(\"Save weights to {}\".format(self.file_name))\n            ckpt_state = {\n                \"start_epoch\": self.epoch + 1,\n                \"model\": save_model.state_dict(),\n                \"optimizer\": self.optimizer.state_dict(),\n            }\n            save_checkpoint(\n                ckpt_state,\n                update_best_ckpt,\n                self.file_name,\n                ckpt_name,\n            )\n"
  },
  {
    "path": "yolox/data/__init__.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom .data_augment import TrainTransform, ValTransform,DiffusionValTransform,DiffusionTrainTransform\nfrom .data_prefetcher import DataPrefetcher\nfrom .dataloading import DataLoader, get_yolox_datadir\nfrom .datasets import *\nfrom .samplers import InfiniteSampler, YoloBatchSampler\n"
  },
  {
    "path": "yolox/data/data_augment.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\"\"\"\nData augmentation functionality. Passed as callable transformations to\nDataset classes.\n\nThe data augmentation procedures were interpreted from @weiliu89's SSD paper\nhttp://arxiv.org/abs/1512.02325\n\"\"\"\n\nimport cv2\nimport numpy as np\n\nimport torch\n\nfrom yolox.utils import xyxy2cxcywh\n\nimport math\nimport random\n\n\ndef augment_hsv(img, hgain=0.015, sgain=0.7, vgain=0.4):\n    r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1  # random gains\n    hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))\n    dtype = img.dtype  # uint8\n\n    x = np.arange(0, 256, dtype=np.int16)\n    lut_hue = ((x * r[0]) % 180).astype(dtype)\n    lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n    lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n    img_hsv = cv2.merge(\n        (cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))\n    ).astype(dtype)\n    cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img)  # no return needed\n\n\ndef box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.2):\n    # box1(4,n), box2(4,n)\n    # Compute candidate boxes which include follwing 5 things:\n    # box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio\n    w1, h1 = box1[2] - box1[0], box1[3] - box1[1]\n    w2, h2 = box2[2] - box2[0], box2[3] - box2[1]\n    ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16))  # aspect ratio\n    return (\n        (w2 > wh_thr)\n        & (h2 > wh_thr)\n        & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr)\n        & (ar < ar_thr)\n    )  # candidates\n\n\ndef random_perspective(\n    img,\n    targets=(),\n    degrees=10,\n    translate=0.1,\n    scale=0.1,\n    shear=10,\n    perspective=0.0,\n    border=(0, 0),\n):\n    # targets = [cls, xyxy]\n    height = img.shape[0] + border[0] * 2  # shape(h,w,c)\n    width = img.shape[1] + border[1] * 2\n\n    # Center\n    C = np.eye(3)\n    C[0, 2] = -img.shape[1] / 2  # x translation (pixels)\n    C[1, 2] = -img.shape[0] / 2  # y translation (pixels)\n\n    # Rotation and Scale\n    R = np.eye(3)\n    a = random.uniform(-degrees, degrees)\n    # a += random.choice([-180, -90, 0, 90])  # add 90deg rotations to small rotations\n    s = random.uniform(scale[0], scale[1])\n    # s = 2 ** random.uniform(-scale, scale)\n    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n    # Shear\n    S = np.eye(3)\n    S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # x shear (deg)\n    S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180)  # y shear (deg)\n\n    # Translation\n    T = np.eye(3)\n    T[0, 2] = (\n        random.uniform(0.5 - translate, 0.5 + translate) * width\n    )  # x translation (pixels)\n    T[1, 2] = (\n        random.uniform(0.5 - translate, 0.5 + translate) * height\n    )  # y translation (pixels)\n\n    # Combined rotation matrix\n    M = T @ S @ R @ C  # order of operations (right to left) is IMPORTANT\n\n    ###########################\n    # For Aug out of Mosaic\n    # s = 1.\n    # M = np.eye(3)\n    ###########################\n\n    if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any():  # image changed\n        if perspective:\n            img = cv2.warpPerspective(\n                img, M, dsize=(width, height), borderValue=(114, 114, 114)\n            )\n        else:  # affine\n            img = cv2.warpAffine(\n                img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)\n            )\n\n    # Transform label coordinates\n    n = len(targets)\n    if n:\n        # warp points\n        xy = np.ones((n * 4, 3))\n        xy[:, :2] = targets[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(\n            n * 4, 2\n        )  # x1y1, x2y2, x1y2, x2y1\n        xy = xy @ M.T  # transform\n        if perspective:\n            xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8)  # rescale\n        else:  # affine\n            xy = xy[:, :2].reshape(n, 8)\n\n        # create new boxes\n        x = xy[:, [0, 2, 4, 6]]\n        y = xy[:, [1, 3, 5, 7]]\n        xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T\n\n        # clip boxes\n        #xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)\n        #xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)\n\n        # filter candidates\n        i = box_candidates(box1=targets[:, :4].T * s, box2=xy.T)\n        targets = targets[i]\n        targets[:, :4] = xy[i]\n        \n        targets = targets[targets[:, 0] < width]\n        targets = targets[targets[:, 2] > 0]\n        targets = targets[targets[:, 1] < height]\n        targets = targets[targets[:, 3] > 0]\n        \n    return img, targets\n\n\ndef _distort(image):\n    def _convert(image, alpha=1, beta=0):\n        tmp = image.astype(float) * alpha + beta\n        tmp[tmp < 0] = 0\n        tmp[tmp > 255] = 255\n        image[:] = tmp\n\n    image = image.copy()\n\n    if random.randrange(2):\n        _convert(image, beta=random.uniform(-32, 32))\n\n    if random.randrange(2):\n        _convert(image, alpha=random.uniform(0.5, 1.5))\n\n    image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n    if random.randrange(2):\n        tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)\n        tmp %= 180\n        image[:, :, 0] = tmp\n\n    if random.randrange(2):\n        _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))\n\n    image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)\n\n    return image\n\n\ndef _mirror(image, boxes):\n    _, width, _ = image.shape\n    if random.randrange(2):\n        image = image[:, ::-1]\n        boxes = boxes.copy()\n        boxes[:, 0::2] = width - boxes[:, 2::-2]\n    return image, boxes\n\n\ndef preproc(image, input_size, mean, std, swap=(2, 0, 1)):\n    if len(image.shape) == 3:\n        padded_img = np.ones((input_size[0], input_size[1], 3)) * 114.0\n    else:\n        padded_img = np.ones(input_size) * 114.0\n    img = np.array(image)\n    r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])\n    resized_img = cv2.resize(\n        img,\n        (int(img.shape[1] * r), int(img.shape[0] * r)),\n        interpolation=cv2.INTER_LINEAR,\n    ).astype(np.float32)\n    padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img\n\n    padded_img = padded_img[:, :, ::-1]\n    padded_img /= 255.0\n    if mean is not None:\n        padded_img -= mean\n    if std is not None:\n        padded_img /= std\n    padded_img = padded_img.transpose(swap)\n    padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)\n    return padded_img, r\n\n\nclass TrainTransform:\n    def __init__(self, p=0.5, rgb_means=None, std=None, max_labels=100):\n        self.means = rgb_means\n        self.std = std\n        self.p = p\n        self.max_labels = max_labels\n\n    def __call__(self, image, targets, input_dim):\n        boxes = targets[:, :4].copy()\n        labels = targets[:, 4].copy()\n        ids = targets[:, 5].copy()\n        if len(boxes) == 0:\n            targets = np.zeros((self.max_labels, 6), dtype=np.float32)\n            image, r_o = preproc(image, input_dim, self.means, self.std)\n            image = np.ascontiguousarray(image, dtype=np.float32)\n            return image, targets\n\n        image_o = image.copy()\n        targets_o = targets.copy()\n        height_o, width_o, _ = image_o.shape\n        boxes_o = targets_o[:, :4]\n        labels_o = targets_o[:, 4]\n        ids_o = targets_o[:, 5]\n        # bbox_o: [xyxy] to [c_x,c_y,w,h]\n        boxes_o = xyxy2cxcywh(boxes_o)\n\n        image_t = _distort(image)\n        image_t, boxes = _mirror(image_t, boxes)\n        height, width, _ = image_t.shape\n        image_t, r_ = preproc(image_t, input_dim, self.means, self.std)\n        # boxes [xyxy] 2 [cx,cy,w,h]\n        boxes = xyxy2cxcywh(boxes)\n        boxes *= r_\n\n        mask_b = np.minimum(boxes[:, 2], boxes[:, 3]) > 1\n        boxes_t = boxes[mask_b]\n        labels_t = labels[mask_b]\n        ids_t = ids[mask_b]\n\n        if len(boxes_t) == 0:\n            image_t, r_o = preproc(image_o, input_dim, self.means, self.std)\n            boxes_o *= r_o\n            boxes_t = boxes_o\n            labels_t = labels_o\n            ids_t = ids_o\n\n        labels_t = np.expand_dims(labels_t, 1)\n        ids_t = np.expand_dims(ids_t, 1)\n\n        targets_t = np.hstack((labels_t, boxes_t, ids_t))\n        padded_labels = np.zeros((self.max_labels, 6))\n        padded_labels[range(len(targets_t))[: self.max_labels]] = targets_t[\n            : self.max_labels\n        ]\n        padded_labels = np.ascontiguousarray(padded_labels, dtype=np.float32)\n        image_t = np.ascontiguousarray(image_t, dtype=np.float32)\n        return image_t, padded_labels\n\nclass DiffusionValTransform:\n    def __init__(self, p=0.5, rgb_means=None, std=None, max_labels=100):\n        self.means = rgb_means\n        self.std = std\n        self.p = p\n        self.max_labels = max_labels\n\n    def __call__(self, image, targets, input_dim):\n        if len(targets) == 0:\n            targets = np.zeros((self.max_labels, 6), dtype=np.float32)\n            image, r_o = preproc(image, input_dim, self.means, self.std)\n            image = np.ascontiguousarray(image, dtype=np.float32)\n            return image, targets\n\n        image_o = image.copy()\n        targets_o = targets.copy()\n\n        boxes_o = targets_o[:, :4]\n        labels_o = targets_o[:, 4]\n        ids_o = targets_o[:, 5]\n        # bbox_o: [xyxy] to [c_x,c_y,w,h]\n        boxes_o = xyxy2cxcywh(boxes_o)\n\n        image_t, r_o = preproc(image_o, input_dim, self.means, self.std)\n        boxes_o *= r_o\n        boxes_t = boxes_o\n        labels_t = labels_o\n        ids_t = ids_o\n\n        labels_t = np.expand_dims(labels_t, 1)\n        ids_t = np.expand_dims(ids_t, 1)\n\n        targets_t = np.hstack((labels_t, boxes_t, ids_t))\n        padded_labels = np.zeros((self.max_labels, 6))\n        padded_labels[range(len(targets_t))[: self.max_labels]] = targets_t[\n            : self.max_labels\n        ]\n        padded_labels = np.ascontiguousarray(padded_labels, dtype=np.float32)\n        image_t = np.ascontiguousarray(image_t, dtype=np.float32)\n        return image_t, padded_labels\n\nclass DiffusionTrainTransform:\n    def __init__(self, p=0.5, rgb_means=None, std=None, max_labels=100):\n        self.means = rgb_means\n        self.std = std\n        self.p = p\n        self.max_labels = max_labels\n\n    def __call__(self, ref_image, ref_targets, track_image, track_targets,input_dim):\n        if len(ref_targets) == 0:\n            ref_targets_t = np.zeros((self.max_labels, 6), dtype=np.float32)\n            ref_image_t, r_o = preproc(ref_image, input_dim, self.means, self.std)\n            ref_image_t = np.ascontiguousarray(ref_image_t, dtype=np.float32)\n            track_targets_t = np.zeros((self.max_labels, 6), dtype=np.float32)\n            track_image_t, r_o = preproc(track_image, input_dim, self.means, self.std)\n            track_image_t = np.ascontiguousarray(track_image_t, dtype=np.float32)\n            return ref_image_t, ref_targets_t,track_image_t,track_targets_t\n\n        ref_image_o = ref_image.copy()\n        ref_targets_o = ref_targets.copy()\n\n        ref_boxes_o = ref_targets_o[:, :4]\n        ref_labels_o = ref_targets_o[:, 4]\n        ref_ids_o = ref_targets_o[:, 5]\n        # bbox_o: [xyxy] to [c_x,c_y,w,h]\n        ref_boxes_o = xyxy2cxcywh(ref_boxes_o)\n\n        ref_image_t, ref_r_o = preproc(ref_image_o, input_dim, self.means, self.std)\n        ref_boxes_o *= ref_r_o\n        ref_boxes_t = ref_boxes_o\n        ref_labels_t = ref_labels_o\n        ref_ids_t = ref_ids_o\n\n        ref_labels_t = np.expand_dims(ref_labels_t, 1)\n        ref_ids_t = np.expand_dims(ref_ids_t, 1)\n\n        ref_targets_t = np.hstack((ref_labels_t, ref_boxes_t, ref_ids_t))\n\n        track_image_o = track_image.copy()\n        track_targets_o = track_targets.copy()\n\n        track_boxes_o = track_targets_o[:, :4]\n        track_labels_o = track_targets_o[:, 4]\n        track_ids_o = track_targets_o[:, 5]\n        # bbox_o: [xyxy] to [c_x,c_y,w,h]\n        track_boxes_o = xyxy2cxcywh(track_boxes_o)\n\n        track_image_t, track_r_o = preproc(track_image_o, input_dim, self.means, self.std)\n        track_boxes_o *= track_r_o\n        track_boxes_t = track_boxes_o\n        track_labels_t = track_labels_o\n        track_ids_t = track_ids_o\n\n        track_labels_t = np.expand_dims(track_labels_t, 1)\n        track_ids_t = np.expand_dims(track_ids_t, 1)\n\n        track_targets_t = np.hstack((track_labels_t, track_boxes_t, track_ids_t))\n\n        ref_padded_labels = np.zeros((self.max_labels, 6))\n        track_padded_labels = np.zeros((self.max_labels, 6))\n        \n        pair_indices=np.argwhere((ref_targets_t[:,5].reshape(-1,1)==track_targets_t[:,5].reshape(1,-1))>0)\n        ref_targets_t=ref_targets_t[pair_indices[:,0]]\n        track_targets_t=track_targets_t[pair_indices[:,1]]\n        ref_padded_labels[range(len(ref_targets_t))[: self.max_labels]] = ref_targets_t[\n            : self.max_labels\n        ]\n        ref_padded_labels = np.ascontiguousarray(ref_padded_labels, dtype=np.float32)\n        ref_image_t = np.ascontiguousarray(ref_image_t, dtype=np.float32)\n\n        track_padded_labels[range(len(track_targets_t))[: self.max_labels]] = track_targets_t[\n            : self.max_labels\n        ]\n        track_padded_labels = np.ascontiguousarray(track_padded_labels, dtype=np.float32)\n        track_image_t = np.ascontiguousarray(track_image_t, dtype=np.float32)\n\n        return ref_image_t,ref_padded_labels,track_image_t,track_padded_labels\n    \nclass ValTransform:\n    \"\"\"\n    Defines the transformations that should be applied to test PIL image\n    for input into the network\n\n    dimension -> tensorize -> color adj\n\n    Arguments:\n        resize (int): input dimension to SSD\n        rgb_means ((int,int,int)): average RGB of the dataset\n            (104,117,123)\n        swap ((int,int,int)): final order of channels\n\n    Returns:\n        transform (transform) : callable transform to be applied to test/val\n        data\n    \"\"\"\n\n    def __init__(self, rgb_means=None, std=None, swap=(2, 0, 1)):\n        self.means = rgb_means\n        self.swap = swap\n        self.std = std\n\n    # assume input is cv2 img for now\n    def __call__(self, img, res, input_size):\n        img, _ = preproc(img, input_size, self.means, self.std, self.swap)\n        return img, np.zeros((1, 5))\n"
  },
  {
    "path": "yolox/data/data_prefetcher.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport torch\nimport torch.distributed as dist\n\nfrom yolox.utils import synchronize\n\nimport random\n\n\nclass DataPrefetcher:\n    \"\"\"\n    DataPrefetcher is inspired by code of following file:\n    https://github.com/NVIDIA/apex/blob/master/examples/imagenet/main_amp.py\n    It could speedup your pytorch dataloader. For more information, please check\n    https://github.com/NVIDIA/apex/issues/304#issuecomment-493562789.\n    \"\"\"\n\n    def __init__(self, loader,task):\n        self.loader = iter(loader)\n        self.task=task\n        self.stream = torch.cuda.Stream()\n        self.record_stream = DataPrefetcher._record_stream_for_image\n        self.preload()\n\n    def preload(self):\n        try:\n            if self.task==\"tracking\":\n                self.next_input_pre, self.next_target_pre,self.next_input_cur, self.next_target_cur,_, _ = next(self.loader)\n            else:\n                self.next_input_pre, self.next_target_pre, _, _ = next(self.loader)\n        except StopIteration:\n            self.next_input_pre = None\n            self.next_target_pre = None\n            if self.task==\"tracking\":\n                self.next_input_cur = None\n                self.next_target_cur = None\n            return\n\n        with torch.cuda.stream(self.stream):\n            self.next_input_pre = self.next_input_pre.cuda(non_blocking=True)\n            self.next_target_pre = self.next_target_pre.cuda(non_blocking=True)\n            if self.task==\"tracking\":\n                self.next_input_cur = self.next_input_cur.cuda(non_blocking=True)\n                self.next_target_cur = self.next_target_cur.cuda(non_blocking=True)\n                \n\n    def next(self):\n        torch.cuda.current_stream().wait_stream(self.stream)\n        input_pre = self.next_input_pre\n        target_pre = self.next_target_pre\n        input_cur = None\n        target_cur = None\n        if self.task==\"tracking\":\n            input_cur = self.next_input_cur\n            target_cur = self.next_target_cur\n        if input_pre is not None:\n            self.record_stream(input_pre)\n        if target_pre is not None:\n            target_pre.record_stream(torch.cuda.current_stream())\n        if self.task==\"tracking\":\n            if input_cur is not None:\n                self.record_stream(input_cur)\n            if target_cur is not None:\n                target_cur.record_stream(torch.cuda.current_stream())\n        self.preload()\n        return input_pre,target_pre,input_cur,target_cur\n        \n\n    @staticmethod\n    def _record_stream_for_image(input):\n        input.record_stream(torch.cuda.current_stream())\n\n\ndef random_resize(data_loader, exp, epoch, rank, is_distributed):\n    tensor = torch.LongTensor(1).cuda()\n    if is_distributed:\n        synchronize()\n\n    if rank == 0:\n        if epoch > exp.max_epoch - 10:\n            size = exp.input_size\n        else:\n            size = random.randint(*exp.random_size)\n            size = int(32 * size)\n        tensor.fill_(size)\n\n    if is_distributed:\n        synchronize()\n        dist.broadcast(tensor, 0)\n\n    input_size = data_loader.change_input_dim(multiple=tensor.item(), random_range=None)\n    return "
  },
  {
    "path": "yolox/data/dataloading.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport torch\nfrom torch.utils.data.dataloader import DataLoader as torchDataLoader\nfrom torch.utils.data.dataloader import default_collate\n\nimport os\nimport random\n\nfrom .samplers import YoloBatchSampler\n\n\ndef get_yolox_datadir():\n    \"\"\"\n    get dataset dir of YOLOX. If environment variable named `YOLOX_DATADIR` is set,\n    this function will return value of the environment variable. Otherwise, use data\n    \"\"\"\n    yolox_datadir = os.getenv(\"YOLOX_DATADIR\", None)\n    if yolox_datadir is None:\n        import yolox\n\n        yolox_path = os.path.dirname(os.path.dirname(yolox.__file__))\n        yolox_datadir = os.path.join(yolox_path, \"datasets\")\n    return yolox_datadir\n\n\nclass DataLoader(torchDataLoader):\n    \"\"\"\n    Lightnet dataloader that enables on the fly resizing of the images.\n    See :class:`torch.utils.data.DataLoader` for more information on the arguments.\n    Check more on the following website:\n    https://gitlab.com/EAVISE/lightnet/-/blob/master/lightnet/data/_dataloading.py\n\n    Note:\n        This dataloader only works with :class:`lightnet.data.Dataset` based datasets.\n\n    Example:\n        >>> class CustomSet(ln.data.Dataset):\n        ...     def __len__(self):\n        ...         return 4\n        ...     @ln.data.Dataset.resize_getitem\n        ...     def __getitem__(self, index):\n        ...         # Should return (image, anno) but here we return (input_dim,)\n        ...         return (self.input_dim,)\n        >>> dl = ln.data.DataLoader(\n        ...     CustomSet((200,200)),\n        ...     batch_size = 2,\n        ...     collate_fn = ln.data.list_collate   # We want the data to be grouped as a list\n        ... )\n        >>> dl.dataset.input_dim    # Default input_dim\n        (200, 200)\n        >>> for d in dl:\n        ...     d\n        [[(200, 200), (200, 200)]]\n        [[(200, 200), (200, 200)]]\n        >>> dl.change_input_dim(320, random_range=None)\n        (320, 320)\n        >>> for d in dl:\n        ...     d\n        [[(320, 320), (320, 320)]]\n        [[(320, 320), (320, 320)]]\n        >>> dl.change_input_dim((480, 320), random_range=None)\n        (480, 320)\n        >>> for d in dl:\n        ...     d\n        [[(480, 320), (480, 320)]]\n        [[(480, 320), (480, 320)]]\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.__initialized = False\n        shuffle = False\n        batch_sampler = None\n        if len(args) > 5:\n            shuffle = args[2]\n            sampler = args[3]\n            batch_sampler = args[4]\n        elif len(args) > 4:\n            shuffle = args[2]\n            sampler = args[3]\n            if \"batch_sampler\" in kwargs:\n                batch_sampler = kwargs[\"batch_sampler\"]\n        elif len(args) > 3:\n            shuffle = args[2]\n            if \"sampler\" in kwargs:\n                sampler = kwargs[\"sampler\"]\n            if \"batch_sampler\" in kwargs:\n                batch_sampler = kwargs[\"batch_sampler\"]\n        else:\n            if \"shuffle\" in kwargs:\n                shuffle = kwargs[\"shuffle\"]\n            if \"sampler\" in kwargs:\n                sampler = kwargs[\"sampler\"]\n            if \"batch_sampler\" in kwargs:\n                batch_sampler = kwargs[\"batch_sampler\"]\n\n        # Use custom BatchSampler\n        if batch_sampler is None:\n            if sampler is None:\n                if shuffle:\n                    sampler = torch.utils.data.sampler.RandomSampler(self.dataset)\n                    # sampler = torch.utils.data.DistributedSampler(self.dataset)\n                else:\n                    sampler = torch.utils.data.sampler.SequentialSampler(self.dataset)\n            batch_sampler = YoloBatchSampler(\n                sampler,\n                self.batch_size,\n                self.drop_last,\n                input_dimension=self.dataset.input_dim,\n            )\n            # batch_sampler = IterationBasedBatchSampler(batch_sampler, num_iterations =\n\n        self.batch_sampler = batch_sampler\n\n        self.__initialized = True\n\n    def close_mosaic(self):\n        self.batch_sampler.mosaic = False\n\n    def change_input_dim(self, multiple=32, random_range=(10, 19)):\n        \"\"\"This function will compute a new size and update it on the next mini_batch.\n\n        Args:\n            multiple (int or tuple, optional): values to multiply the randomly generated range by.\n                Default **32**\n            random_range (tuple, optional): This (min, max) tuple sets the range\n                for the randomisation; Default **(10, 19)**\n\n        Return:\n            tuple: width, height tuple with new dimension\n\n        Note:\n            The new size is generated as follows: |br|\n            First we compute a random integer inside ``[random_range]``.\n            We then multiply that number with the ``multiple`` argument,\n            which gives our final new input size. |br|\n            If ``multiple`` is an integer we generate a square size. If you give a tuple\n            of **(width, height)**, the size is computed\n            as :math:`rng * multiple[0], rng * multiple[1]`.\n\n        Note:\n            You can set the ``random_range`` argument to **None** to set\n            an exact size of multiply. |br|\n            See the example above for how this works.\n        \"\"\"\n        if random_range is None:\n            size = 1\n        else:\n            size = random.randint(*random_range)\n\n        if isinstance(multiple, int):\n            size = (size * multiple, size * multiple)\n        else:\n            size = (size * multiple[0], size * multiple[1])\n\n        self.batch_sampler.new_input_dim = size\n\n        return size\n\n\ndef list_collate(batch):\n    \"\"\"\n    Function that collates lists or tuples together into one list (of lists/tuples).\n    Use this as the collate function in a Dataloader, if you want to have a list of\n    items as an output, as opposed to tensors (eg. Brambox.boxes).\n    \"\"\"\n    items = list(zip(*batch))\n\n    for i in range(len(items)):\n        if isinstance(items[i][0], (list, tuple)):\n            items[i] = list(items[i])\n        else:\n            items[i] = default_collate(items[i])\n\n    return items\n"
  },
  {
    "path": "yolox/data/samplers.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport torch\nimport torch.distributed as dist\nfrom torch.utils.data.sampler import BatchSampler as torchBatchSampler\nfrom torch.utils.data.sampler import Sampler\n\nimport itertools\nfrom typing import Optional\n\n\nclass YoloBatchSampler(torchBatchSampler):\n    \"\"\"\n    This batch sampler will generate mini-batches of (dim, index) tuples from another sampler.\n    It works just like the :class:`torch.utils.data.sampler.BatchSampler`,\n    but it will prepend a dimension, whilst ensuring it stays the same across one mini-batch.\n    \"\"\"\n\n    def __init__(self, *args, input_dimension=None, mosaic=True, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.input_dim = input_dimension\n        self.new_input_dim = None\n        self.mosaic = mosaic\n\n    def __iter__(self):\n        self.__set_input_dim()\n        for batch in super().__iter__():\n            yield [(self.input_dim, idx, self.mosaic) for idx in batch]\n            self.__set_input_dim()\n\n    def __set_input_dim(self):\n        \"\"\" This function randomly changes the the input dimension of the dataset. \"\"\"\n        if self.new_input_dim is not None:\n            self.input_dim = (self.new_input_dim[0], self.new_input_dim[1])\n            self.new_input_dim = None\n\n\nclass InfiniteSampler(Sampler):\n    \"\"\"\n    In training, we only care about the \"infinite stream\" of training data.\n    So this sampler produces an infinite stream of indices and\n    all workers cooperate to correctly shuffle the indices and sample different indices.\n    The samplers in each worker effectively produces `indices[worker_id::num_workers]`\n    where `indices` is an infinite stream of indices consisting of\n    `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)\n    or `range(size) + range(size) + ...` (if shuffle is False)\n    \"\"\"\n\n    def __init__(\n        self,\n        size: int,\n        shuffle: bool = True,\n        seed: Optional[int] = 0,\n        rank=0,\n        world_size=1,\n    ):\n        \"\"\"\n        Args:\n            size (int): the total number of data of the underlying dataset to sample from\n            shuffle (bool): whether to shuffle the indices or not\n            seed (int): the initial seed of the shuffle. Must be the same\n                across all workers. If None, will use a random seed shared\n                among workers (require synchronization among all workers).\n        \"\"\"\n        self._size = size\n        assert size > 0\n        self._shuffle = shuffle\n        self._seed = int(seed)\n\n        if dist.is_available() and dist.is_initialized():\n            self._rank = dist.get_rank()\n            self._world_size = dist.get_world_size()\n        else:\n            self._rank = rank\n            self._world_size = world_size\n\n    def __iter__(self):\n        start = self._rank\n        yield from itertools.islice(\n            self._infinite_indices(), start, None, self._world_size\n        )\n\n    def _infinite_indices(self):\n        g = torch.Generator()\n        g.manual_seed(self._seed)\n        while True:\n            if self._shuffle:\n                yield from torch.randperm(self._size, generator=g)\n            else:\n                yield from torch.arange(self._size)\n\n    def __len__(self):\n        return self._size // self._world_size\n"
  },
  {
    "path": "yolox/evaluators/__init__.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom .coco_evaluator import COCOEvaluator\nfrom .diffusion_mot_evaluator import DiffusionMOTEvaluator\nfrom .diffusion_mot_evaluator_kl import DiffusionMOTEvaluatorKL\n"
  },
  {
    "path": "yolox/evaluators/coco_evaluator.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom loguru import logger\nfrom tqdm import tqdm\n\nimport torch\n\nfrom yolox.utils import (\n    gather,\n    is_main_process,\n    postprocess,\n    diffusion_postprocess,\n    synchronize,\n    time_synchronized,\n    xyxy2xywh\n)\n\nimport contextlib\nimport io\nimport itertools\nimport json\nimport tempfile\nimport time\n\n\nclass COCOEvaluator:\n    \"\"\"\n    COCO AP Evaluation class.  All the data in the val2017 dataset are processed\n    and evaluated by COCO API.\n    \"\"\"\n\n    def __init__(\n        self, dataloader, img_size, confthre, nmsthre3d, detthre, nmsthre2d, num_classes, testdev=False\n    ):\n        \"\"\"\n        Args:\n            dataloader (Dataloader): evaluate dataloader.\n            img_size (int): image size after preprocess. images are resized\n                to squares whose shape is (img_size, img_size).\n            confthre (float): confidence threshold ranging from 0 to 1, which\n                is defined in the config file.\n            nmsthre (float): IoU threshold of non-max supression ranging from 0 to 1.\n        \"\"\"\n        self.dataloader = dataloader\n        self.img_size = img_size\n        self.confthre = confthre\n        self.nmsthre3d = nmsthre3d\n        self.detthre=detthre\n        self.nmsthre2d=nmsthre2d\n        self.num_classes = num_classes\n        self.testdev = testdev\n\n    def evaluate(\n        self,\n        model,\n        distributed=False,\n        half=False,\n        trt_file=None,\n        decoder=None,\n        test_size=None,\n    ):\n        \"\"\"\n        COCO average precision (AP) Evaluation. Iterate inference on the test dataset\n        and the results are evaluated by COCO API.\n\n        NOTE: This function will change training mode to False, please save states if needed.\n\n        Args:\n            model : model to evaluate.\n\n        Returns:\n            ap50_95 (float) : COCO AP of IoU=50:95\n            ap50 (float) : COCO AP of IoU=50\n            summary (sr): summary info of evaluation.\n        \"\"\"\n        # TODO half to amp_test\n        tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor\n        model = model.eval()\n        if half:\n            model = model.half()\n        ids = []\n        data_list = []\n        progress_bar = tqdm if is_main_process() else iter\n\n        inference_time = 0\n        nms_time = 0\n        n_samples = len(self.dataloader) - 1\n\n        if trt_file is not None:\n            from torch2trt import TRTModule\n\n            model_trt = TRTModule()\n            model_trt.load_state_dict(torch.load(trt_file))\n\n            x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()\n            model(x)  \n            model = model_trt\n\n        for cur_iter, (imgs,targets,info_imgs, ids) in enumerate(\n            progress_bar(self.dataloader)\n        ):\n            with torch.no_grad():\n                imgs = imgs.type(tensor_type)\n                targets=targets.type(tensor_type)\n\n                # skip the the last iters since batchsize might be not enough for batch inference\n                is_time_record = cur_iter < len(self.dataloader) - 1\n                if is_time_record:\n                    start = time.time()\n                bboxes=targets[...,1:5]\n                outputs = model((imgs,None),(None,None))\n                if decoder is not None:\n                    outputs = decoder(outputs, dtype=outputs.type())\n\n                if is_time_record:\n                    infer_end = time_synchronized()\n                    inference_time += infer_end - start\n\n                pre_outputs,cur_outputs=torch.split(outputs[0],len(outputs[0])//2)\n                outputs = diffusion_postprocess(\n                    pre_outputs,cur_outputs,outputs[1],conf_thre=self.confthre,\n                    det_thre=self.detthre,nms_thre3d=self.nmsthre3d,nms_thre2d=self.nmsthre2d\n                )\n                if is_time_record:\n                    nms_end = time_synchronized()\n                    nms_time += nms_end - infer_end\n\n            data_list.extend(self.convert_to_coco_format(outputs, info_imgs, ids))\n\n        statistics = torch.cuda.FloatTensor([inference_time, nms_time, n_samples])\n        if distributed:\n            data_list = gather(data_list, dst=0)\n            data_list = list(itertools.chain(*data_list))\n            torch.distributed.reduce(statistics, dst=0)\n\n        eval_results = self.evaluate_prediction(data_list, statistics)\n        synchronize()\n        return eval_results\n\n    def convert_to_coco_format(self, outputs, info_imgs, ids):\n        data_list = []\n        for (output, img_h, img_w, img_id) in zip(\n            outputs, info_imgs[0], info_imgs[1], ids\n        ):\n            if output is None:\n                continue\n            output = output.cpu()\n\n            bboxes = output[:, 0:4]\n\n            # preprocessing: resize\n            scale = min(\n                self.img_size[0] / float(img_h), self.img_size[1] / float(img_w)\n            )\n            bboxes /= scale\n            bboxes = xyxy2xywh(bboxes)\n\n            cls = output[:, 6]\n            scores = output[:, 4] * output[:, 5]\n            for ind in range(bboxes.shape[0]):\n                label = self.dataloader.dataset.class_ids[int(cls[ind])]\n                pred_data = {\n                    \"image_id\": int(img_id),\n                    \"category_id\": label,\n                    \"bbox\": bboxes[ind].numpy().tolist(),\n                    \"score\": scores[ind].numpy().item(),\n                    \"segmentation\": [],\n                }  # COCO json format\n                data_list.append(pred_data)\n        return data_list\n\n    def evaluate_prediction(self, data_dict, statistics):\n        if not is_main_process():\n            return 0, 0, None\n\n        logger.info(\"Evaluate in main process...\")\n\n        annType = [\"segm\", \"bbox\", \"keypoints\"]\n\n        inference_time = statistics[0].item()\n        nms_time = statistics[1].item()\n        n_samples = statistics[2].item()\n\n        a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size)\n        a_nms_time = 1000 * nms_time / (n_samples * self.dataloader.batch_size)\n\n        time_info = \", \".join(\n            [\n                \"Average {} time: {:.2f} ms\".format(k, v)\n                for k, v in zip(\n                    [\"forward\", \"NMS\", \"inference\"],\n                    [a_infer_time, a_nms_time, (a_infer_time + a_nms_time)],\n                )\n            ]\n        )\n\n        info = time_info + \"\\n\"\n\n        # Evaluate the Dt (detection) json comparing with the ground truth\n        if len(data_dict) > 0:\n            cocoGt = self.dataloader.dataset.coco\n            # TODO: since pycocotools can't process dict in py36, write data to json file.\n            if self.testdev:\n                json.dump(data_dict, open(\"./yolox_testdev_2017.json\", \"w\"))\n                cocoDt = cocoGt.loadRes(\"./yolox_testdev_2017.json\")\n            else:\n                _, tmp = tempfile.mkstemp()\n                json.dump(data_dict, open(tmp, \"w\"))\n                cocoDt = cocoGt.loadRes(tmp)\n            '''\n            try:\n                from yolox.layers import COCOeval_opt as COCOeval\n            except ImportError:\n                from pycocotools import cocoeval as COCOeval\n                logger.warning(\"Use standard COCOeval.\")\n            '''\n            #from pycocotools.cocoeval import COCOeval\n            from yolox.layers import COCOeval_opt as COCOeval\n            cocoEval = COCOeval(cocoGt, cocoDt, annType[1])\n            cocoEval.evaluate()\n            cocoEval.accumulate()\n            redirect_string = io.StringIO()\n            with contextlib.redirect_stdout(redirect_string):\n                cocoEval.summarize()\n            info += redirect_string.getvalue()\n            return cocoEval.stats[0], cocoEval.stats[1], info\n        else:\n            return 0, 0, info\n"
  },
  {
    "path": "yolox/evaluators/diffusion_mot_evaluator.py",
    "content": "from collections import defaultdict\nfrom loguru import logger\nfrom tqdm import tqdm\n\nimport torch\n\nfrom yolox.utils import (\n    gather,\n    is_main_process,\n    postprocess,\n    synchronize,\n    time_synchronized,\n    xyxy2xywh\n)\nfrom yolox.tracker.diffusion_tracker import DiffusionTracker\nfrom yolox.models import  YOLOXHead\n\nimport contextlib\nimport io\nimport os\nimport itertools\nimport json\nimport tempfile\nimport time\nimport numpy as np\n\ndef write_results(filename, results):\n    save_format = '{frame},{id},{x1:.1f},{y1:.1f},{w:.1f},{h:.1f},{s:.2f},-1,-1,-1\\n'\n    with open(filename, 'w') as f:\n        for frame_id, tlwhs, track_ids, scores in results:\n            for tlwh, track_id, score in zip(tlwhs, track_ids, scores):\n                if track_id < 0:\n                    continue\n                x1, y1, w, h = tlwh\n                line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, w=w, h=h, s=score)\n                f.write(line)\n    logger.info('save results to {}'.format(filename))\n\n\ndef write_results_no_score(filename, results):\n    save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\\n'\n    with open(filename, 'w') as f:\n        for frame_id, tlwhs, track_ids in results:\n            for tlwh, track_id in zip(tlwhs, track_ids):\n                if track_id < 0:\n                    continue\n                x1, y1, w, h = tlwh\n                line = save_format.format(frame=frame_id, id=track_id, x1=round(x1, 1), y1=round(y1, 1), w=round(w, 1), h=round(h, 1))\n                f.write(line)\n    logger.info('save results to {}'.format(filename))\n\n\nclass DiffusionMOTEvaluator:\n    \"\"\"\n    COCO AP Evaluation class.  All the data in the val2017 dataset are processed\n    and evaluated by COCO API.\n    \"\"\"\n\n    def __init__(\n        self, args, dataloader, img_size, confthre, nmsthre3d, detthre,nmsthre2d,interval, num_classes):\n        \"\"\"\n        Args:\n            dataloader (Dataloader): evaluate dataloader.\n            img_size (int): image size after preprocess. images are resized\n                to squares whose shape is (img_size, img_size).\n            confthre (float): confidence threshold ranging from 0 to 1, which\n                is defined in the config file.\n            nmsthre (float): IoU threshold of non-max supression ranging from 0 to 1.\n        \"\"\"\n        self.dataloader = dataloader\n        self.img_size = img_size\n        self.confthre = confthre\n        self.nmsthre3d = nmsthre3d\n        self.detthre=detthre\n        self.nmsthre2d=nmsthre2d\n        self.num_classes = num_classes\n        self.association_interval=interval\n        self.args = args\n\n    def evaluate(\n        self,\n        model,\n        distributed=False,\n        half=False,\n        trt_file=None,\n        decoder=None,\n        test_size=None,\n        result_folder=None\n    ):\n        \"\"\"\n        COCO average precision (AP) Evaluation. Iterate inference on the test dataset\n        and the results are evaluated by COCO API.\n\n        NOTE: This function will change training mode to False, please save states if needed.\n\n        Args:\n            model : model to evaluate.\n\n        Returns:\n            ap50_95 (float) : COCO AP of IoU=50:95\n            ap50 (float) : COCO AP of IoU=50\n            summary (sr): summary info of evaluation.\n        \"\"\"\n        # TODO half to amp_test\n        tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor\n        model = model.eval()\n        if half:\n            model = model.half()\n        ids = []\n        data_list = []\n        results = []\n        seq_ids=[]\n        seq_info_imgs=[]\n        seq_frame_ids=[]\n        video_names = defaultdict()\n        ori_detthre=self.detthre\n        ori_confthre=self.confthre\n        progress_bar = tqdm if is_main_process() else iter\n\n        track_time = 0\n        n_samples = len(self.dataloader) - 1\n\n        if trt_file is not None:\n            from torch2trt import TRTModule\n\n            model_trt = TRTModule()\n            model_trt.load_state_dict(torch.load(trt_file))\n\n            x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()\n            model(x)\n            model = model_trt\n            \n        tracker = DiffusionTracker(model,tensor_type)\n        for cur_iter, (imgs, _, info_imgs, ids) in enumerate(\n            progress_bar(self.dataloader)\n        ):\n            with torch.no_grad():\n                # init tracker\n                frame_id = info_imgs[2].item()\n                video_id = info_imgs[3].item()\n                img_file_name = info_imgs[4]\n                video_name = img_file_name[0].split('/')[0]\n\n                if video_name not in video_names:\n                    video_names[video_id] = video_name\n                \n                self.detthre=ori_detthre\n                self.confthre=ori_confthre\n                # if video_name == 'MOT17-02-FRCNN' or video_name == 'MOT17-01-FRCNN':\n                # #self.association_interval = 1\n                #     self.confthre = 0.6\n                # elif video_name == 'MOT17-04-FRCNN' or video_name=='MOT17-03-FRCNN':\n                #     self.detthre = 0.5\n                #     #self.association_interval = 1\n                #     self.confthre = 0.4\n                # elif video_name == 'MOT17-05-FRCNN' or video_name == 'MOT17-06-FRCNN':\n                #     #self.association_interval = 1\n                #     self.confthre = 0.4\n                # elif video_name == 'MOT17-09-FRCNN' or video_name == 'MOT17-07-FRCNN':\n                #     self.confthre = 0.3\n                #     self.detthre = 0.5\n                #     self.nmsthre3d = 0.6\n                # elif video_name == 'MOT17-10-FRCNN' or video_name == 'MOT17-8-FRCNN':\n                #     self.confthre = 0.4\n                #     self.nmsthre3d = 0.5\n                # elif video_name == 'MOT17-11-FRCNN' or video_name == 'MOT17-12-FRCNN':\n                #     self.confthre = 0.5\n                # elif video_name == 'MOT17-13-FRCNN' or video_name == 'MOT17-14-FRCNN':\n                #     self.confthre = 0.5\n                #     self.detthre = 0.5\n                # if video_name ==\"MOT20-06\" or video_name==\"MOT20-08\":\n                #     self.detthre=0.3\n\n                # if video_name!=\"MOT20-01\" and video_name!=\"MOT20-02\":\n                #     continue\n\n                if frame_id == 1:\n                    if len(seq_ids) != 0:\n                        outputs=tracker.get_results()\n                        track_time+=tracker.total_time\n                        result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id - 1]))\n                        for output,info_img,id,cur_frame_id in zip(outputs,seq_info_imgs,seq_ids,seq_frame_ids):\n                            output_results,scale = self.convert_to_coco_format(output, info_img, id)\n                            data_list.extend(output_results)\n\n                            # run tracking\n                            online_tlwhs = []\n                            online_ids = []\n                            online_scores = []\n                            for tid,obj in zip(*output):\n                                xyxy = obj[:4]/scale\n                                tlwh = [xyxy[0],xyxy[1],xyxy[2]-xyxy[0],xyxy[3]-xyxy[1]]\n                                vertical = tlwh[2] / tlwh[3] > 1.6\n                                if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical:\n                                    online_tlwhs.append(tlwh)\n                                    online_ids.append(tid)\n                                    online_scores.append(obj[4])\n                                # save results\n                            results.append((cur_frame_id, online_tlwhs, online_ids, online_scores))\n                        write_results(result_filename, results)\n                        results = []\n                        seq_ids=[]\n                        seq_info_imgs=[]\n                        seq_frame_ids=[]\n                    tracker = DiffusionTracker(model,tensor_type,self.confthre,self.detthre,self.nmsthre3d,self.nmsthre2d,self.association_interval)\n\n                imgs = imgs.type(tensor_type)\n\n                # skip the the last iters since batchsize might be not enough for batch inference\n\n                tracker.update(imgs)\n            \n\n            seq_ids.append(ids)\n            seq_info_imgs.append(info_imgs)\n            seq_frame_ids.append(frame_id)\n            \n            if cur_iter == len(self.dataloader) - 1:\n                result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id]))\n                outputs=tracker.get_results()\n                track_time+=tracker.total_time\n                for output,info_img,id,cur_frame_id in zip(outputs,seq_info_imgs,seq_ids,seq_frame_ids):\n                        output_results,scale = self.convert_to_coco_format(output, info_img, id)\n                        data_list.extend(output_results)\n                        # run tracking\n                        online_tlwhs = []\n                        online_ids = []\n                        online_scores = []\n                        for tid,obj in zip(*output):\n                            xyxy = obj[:4]/scale\n                            tlwh = [xyxy[0],xyxy[1],xyxy[2]-xyxy[0],xyxy[3]-xyxy[1]]\n                            vertical = tlwh[2] / tlwh[3] > 1.6\n                            if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical:\n                                online_tlwhs.append(tlwh)\n                                online_ids.append(tid)\n                                online_scores.append(obj[4])\n                            # save results\n                        results.append((cur_frame_id, online_tlwhs, online_ids, online_scores))\n                write_results(result_filename, results)\n\n        print(\"diffusion track fps : {}\".format(n_samples*2/track_time))\n        \n        statistics = torch.cuda.FloatTensor([0, track_time, n_samples])\n        if distributed:\n            data_list = gather(data_list, dst=0)\n            data_list = list(itertools.chain(*data_list))\n            torch.distributed.reduce(statistics, dst=0)\n\n        eval_results = self.evaluate_prediction(data_list, statistics)\n        synchronize()\n        return eval_results\n\n    def convert_to_coco_format(self, output, info_imgs, ids):\n        data_list = []\n        scale = min(\n                self.img_size[0] / float(info_imgs[0]), self.img_size[1] / float(info_imgs[1])\n            )\n        bboxes = []\n        clses = []\n        scores = []\n\n        if len(output[1])>0:\n            for t in output[1]:\n                bboxes.append(t[:4])\n                clses.append(0)\n                scores.append(t[4])\n            bboxes=np.array(bboxes)\n            bboxes /= scale\n            bboxes = xyxy2xywh(bboxes)\n            \n        for ind in range(len(bboxes)):\n            label = self.dataloader.dataset.class_ids[int(clses[ind])]\n            pred_data = {\n                \"image_id\": int(ids[0]),\n                \"category_id\": label,\n                \"bbox\": bboxes[ind].tolist(),\n                \"score\": float(scores[ind]),\n                \"segmentation\": [],\n            }  # COCO json format\n            data_list.append(pred_data)\n        return data_list,scale\n\n    def evaluate_prediction(self, data_dict, statistics):\n        if not is_main_process():\n            return 0, 0, None\n\n        logger.info(\"Evaluate in main process...\")\n\n        annType = [\"segm\", \"bbox\", \"keypoints\"]\n\n        inference_time = statistics[0].item()\n        track_time = statistics[1].item()\n        n_samples = statistics[2].item()\n\n        a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size)\n        a_track_time = 1000 * track_time / (n_samples * self.dataloader.batch_size)\n\n        time_info = \", \".join(\n            [\n                \"Average {} time: {:.2f} ms\".format(k, v)\n                for k, v in zip(\n                    [\"forward\", \"track\", \"inference\"],\n                    [a_infer_time, a_track_time, (a_infer_time + a_track_time)],\n                )\n            ]\n        )\n\n        info = time_info + \"\\n\"\n\n        # Evaluate the Dt (detection) json comparing with the ground truth\n        if len(data_dict) > 0:\n            cocoGt = self.dataloader.dataset.coco\n            # TODO: since pycocotools can't process dict in py36, write data to json file.\n            _, tmp = tempfile.mkstemp()\n            json.dump(data_dict, open(tmp, \"w\"))\n            cocoDt = cocoGt.loadRes(tmp)\n            '''\n            try:\n                from yolox.layers import COCOeval_opt as COCOeval\n            except ImportError:\n                from pycocotools import cocoeval as COCOeval\n                logger.warning(\"Use standard COCOeval.\")\n            '''\n            #from pycocotools.cocoeval import COCOeval\n            from yolox.layers import COCOeval_opt as COCOeval\n            cocoEval = COCOeval(cocoGt, cocoDt, annType[1])\n            cocoEval.evaluate()\n            cocoEval.accumulate()\n            redirect_string = io.StringIO()\n            with contextlib.redirect_stdout(redirect_string):\n                cocoEval.summarize()\n            info += redirect_string.getvalue()\n            return cocoEval.stats[0], cocoEval.stats[1], info\n        else:\n            return 0, 0, info\n"
  },
  {
    "path": "yolox/evaluators/diffusion_mot_evaluator_kl.py",
    "content": "from collections import defaultdict\nfrom loguru import logger\nfrom tqdm import tqdm\n\nimport torch\n\nfrom yolox.utils import (\n    gather,\n    is_main_process,\n    postprocess,\n    synchronize,\n    time_synchronized,\n    xyxy2xywh\n)\nfrom yolox.tracker.diffusion_tracker_kl import DiffusionTracker\nfrom yolox.models import  YOLOXHead\n\nimport contextlib\nimport io\nimport os\nimport itertools\nimport json\nimport tempfile\nimport time\nimport numpy as np\nimport cv2\nfrom yolox.utils.visualize import plot_tracking\n\ndef write_results(filename, results):\n    save_format = '{frame},{id},{x1:.1f},{y1:.1f},{w:.1f},{h:.1f},{s:.2f},-1,-1,-1\\n'\n    with open(filename, 'w') as f:\n        for frame_id, tlwhs, track_ids, scores in results:\n            for tlwh, track_id, score in zip(tlwhs, track_ids, scores):\n                if track_id < 0:\n                    continue\n                x1, y1, w, h = tlwh\n                line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, w=w, h=h, s=score)\n                f.write(line)\n    logger.info('save results to {}'.format(filename))\n\n\ndef write_results_no_score(filename, results):\n    save_format = '{frame},{id},{x1},{y1},{w},{h},-1,-1,-1,-1\\n'\n    with open(filename, 'w') as f:\n        for frame_id, tlwhs, track_ids in results:\n            for tlwh, track_id in zip(tlwhs, track_ids):\n                if track_id < 0:\n                    continue\n                x1, y1, w, h = tlwh\n                line = save_format.format(frame=frame_id, id=track_id, x1=round(x1, 1), y1=round(y1, 1), w=round(w, 1), h=round(h, 1))\n                f.write(line)\n    logger.info('save results to {}'.format(filename))\n\n\nclass DiffusionMOTEvaluatorKL:\n    \"\"\"\n    COCO AP Evaluation class.  All the data in the val2017 dataset are processed\n    and evaluated by COCO API.\n    \"\"\"\n\n    def __init__(\n        self, args, dataloader, img_size, confthre, nmsthre3d, detthre,nmsthre2d,interval, num_classes):\n        \"\"\"\n        Args:\n            dataloader (Dataloader): evaluate dataloader.\n            img_size (int): image size after preprocess. images are resized\n                to squares whose shape is (img_size, img_size).\n            confthre (float): confidence threshold ranging from 0 to 1, which\n                is defined in the config file.\n            nmsthre (float): IoU threshold of non-max supression ranging from 0 to 1.\n        \"\"\"\n        self.dataloader = dataloader\n        self.img_size = img_size\n        self.confthre = confthre\n        self.nmsthre3d = nmsthre3d\n        self.detthre=detthre\n        self.nmsthre2d=nmsthre2d\n        self.num_classes = num_classes\n        self.association_interval=interval\n        self.args = args\n\n    def evaluate(\n        self,\n        model,\n        distributed=False,\n        half=False,\n        trt_file=None,\n        decoder=None,\n        test_size=None,\n        result_folder=None\n    ):\n        \"\"\"\n        COCO average precision (AP) Evaluation. Iterate inference on the test dataset\n        and the results are evaluated by COCO API.\n\n        NOTE: This function will change training mode to False, please save states if needed.\n\n        Args:\n            model : model to evaluate.\n\n        Returns:\n            ap50_95 (float) : COCO AP of IoU=50:95\n            ap50 (float) : COCO AP of IoU=50\n            summary (sr): summary info of evaluation.\n        \"\"\"\n        # TODO half to amp_test\n        tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor\n        model = model.eval()\n        if half:\n            model = model.half()\n        ids = []\n        data_list = []\n        results = []\n        # seq_ids=[]\n        # seq_info_imgs=[]\n        # seq_frame_ids=[]\n        video_names = defaultdict()\n        ori_detthre=self.detthre\n        ori_confthre=self.confthre\n        progress_bar = tqdm if is_main_process() else iter\n\n        track_time = 0\n        n_samples = len(self.dataloader) - 1\n\n        if trt_file is not None:\n            from torch2trt import TRTModule\n\n            model_trt = TRTModule()\n            model_trt.load_state_dict(torch.load(trt_file))\n\n            x = torch.ones(1, 3, test_size[0], test_size[1]).cuda()\n            model(x)\n            model = model_trt\n            \n        tracker = DiffusionTracker(model,tensor_type)\n        for cur_iter, (imgs, _, info_imgs, ids) in enumerate(\n            progress_bar(self.dataloader)\n        ):\n            with torch.no_grad():\n                # init tracker\n                frame_id = info_imgs[2].item()\n                video_id = info_imgs[3].item()\n                img_file_name = info_imgs[4]\n                video_name = img_file_name[0].split('/')[0]\n\n                # if not (\"MOT17-12\" in video_name or \"MOT17-14\" in video_name):\n                #     continue\n\n                if video_name not in video_names:\n                    video_names[video_id] = video_name\n                \n                self.detthre=ori_detthre\n                self.confthre=ori_confthre\n                # if video_name ==\"MOT20-06\" or video_name==\"MOT20-08\":\n                #     self.detthre=0.4\n\n                # if video_name!=\"dancetrack0007\":\n                #     continue\n\n                if frame_id == 1:\n                    # text_path=\"DiffusionTrack_outputs/yolox_x_diffusion_track_mot20/track_results_mot20_test/{}.txt\".format(video_name)\n                    # scale = min(\n                    #     896 / float(info_imgs[0]), 1600 / float(info_imgs[1])\n                    #     )\n                    # detections=defaultdict(list)\n                    # with open(text_path,'r') as f:\n                    #     for line in f.readlines():\n                    #         data=line.strip().split(',')\n                    #         detections[int(data[0])].append([float(data[2])*scale,float(data[3])*scale,(float(data[4])+float(data[2]))*scale,(float(data[5])+float(data[3]))*scale,1,float(data[6])])\n                    detections=None\n                    tracker = DiffusionTracker(model,tensor_type,self.confthre,self.detthre,self.nmsthre3d,self.nmsthre2d,self.association_interval,detections)\n                    if len(results) != 0:\n                        result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id - 1]))\n                        write_results(result_filename, results)\n                        results = []\n\n                # skip the the last iters since batchsize might be not enough for batch inference\n                imgs = imgs.type(tensor_type)\n                output,association_time=tracker.update(imgs)\n                track_time+=association_time\n\n                output_results,scale = self.convert_to_coco_format(output, info_imgs, ids)\n                data_list.extend(output_results)\n\n                # run tracking\n                online_tlwhs = []\n                online_ids = []\n                online_scores = []\n                for t in output:\n                    tlwh = t._tlwh/scale\n                    # tlwh = [xyxy[0],xyxy[1],xyxy[2]-xyxy[0],xyxy[3]-xyxy[1]]\n                    vertical = tlwh[2] / tlwh[3] > 1.6\n                    if tlwh[2] * tlwh[3] > self.args.min_box_area and not vertical:\n                        online_tlwhs.append(tlwh)\n                        online_ids.append(t.track_id)\n                        online_scores.append(t.score)\n                    # save results\n                # image_path=os.path.join(\"DiffusionTrack/datasets/dancetrack/train\",info_imgs[4][0])\n                # raw_image= cv2.imread(image_path)\n                # online_im = plot_tracking(\n                #     raw_image, online_tlwhs, online_ids, frame_id=frame_id, fps=30\n                # )\n                # os.makedirs(\"DiffusionTrack/vis_fold/{}\".format(video_name),exist_ok=True)\n                # cv2.imwrite(\"DiffusionTrack/vis_fold/{}/{:0>5d}.jpg\".format(video_name,frame_id),online_im)\n\n                results.append((frame_id, online_tlwhs, online_ids, online_scores))\n            \n            if cur_iter == len(self.dataloader) - 1:\n                result_filename = os.path.join(result_folder, '{}.txt'.format(video_names[video_id]))\n                write_results(result_filename, results)\n\n        print(\"diffusion track fps : {}\".format(2*n_samples/track_time))\n        \n        statistics = torch.cuda.FloatTensor([0, track_time, n_samples])\n        if distributed:\n            data_list = gather(data_list, dst=0)\n            data_list = list(itertools.chain(*data_list))\n            torch.distributed.reduce(statistics, dst=0)\n\n        eval_results = self.evaluate_prediction(data_list, statistics)\n        synchronize()\n        return eval_results\n\n    def convert_to_coco_format(self, output, info_imgs, ids):\n        data_list = []\n        scale = min(\n                self.img_size[0] / float(info_imgs[0]), self.img_size[1] / float(info_imgs[1])\n            )\n        bboxes = []\n        clses = []\n        scores = []\n\n        if len(output)>0:\n            for t in output:\n                bboxes.append(t._tlwh)\n                clses.append(0)\n                scores.append(t.score)\n            bboxes=np.array(bboxes)\n            bboxes /= scale\n            # bboxes = xyxy2xywh(bboxes)\n            \n        for ind in range(len(bboxes)):\n            label = self.dataloader.dataset.class_ids[int(clses[ind])]\n            pred_data = {\n                \"image_id\": int(ids[0]),\n                \"category_id\": label,\n                \"bbox\": bboxes[ind].tolist(),\n                \"score\": float(scores[ind]),\n                \"segmentation\": [],\n            }  # COCO json format\n            data_list.append(pred_data)\n        return data_list,scale\n\n    def evaluate_prediction(self, data_dict, statistics):\n        if not is_main_process():\n            return 0, 0, None\n\n        logger.info(\"Evaluate in main process...\")\n\n        annType = [\"segm\", \"bbox\", \"keypoints\"]\n\n        inference_time = statistics[0].item()\n        track_time = statistics[1].item()\n        n_samples = statistics[2].item()\n\n        a_infer_time = 1000 * inference_time / (n_samples * self.dataloader.batch_size)\n        a_track_time = 1000 * track_time / (n_samples * self.dataloader.batch_size)\n\n        time_info = \", \".join(\n            [\n                \"Average {} time: {:.2f} ms\".format(k, v)\n                for k, v in zip(\n                    [\"forward\", \"track\", \"inference\"],\n                    [a_infer_time, a_track_time, (a_infer_time + a_track_time)],\n                )\n            ]\n        )\n\n        info = time_info + \"\\n\"\n\n        # Evaluate the Dt (detection) json comparing with the ground truth\n        if len(data_dict) > 0:\n            cocoGt = self.dataloader.dataset.coco\n            # TODO: since pycocotools can't process dict in py36, write data to json file.\n            _, tmp = tempfile.mkstemp()\n            json.dump(data_dict, open(tmp, \"w\"))\n            cocoDt = cocoGt.loadRes(tmp)\n            '''\n            try:\n                from yolox.layers import COCOeval_opt as COCOeval\n            except ImportError:\n                from pycocotools import cocoeval as COCOeval\n                logger.warning(\"Use standard COCOeval.\")\n            '''\n            #from pycocotools.cocoeval import COCOeval\n            from yolox.layers import COCOeval_opt as COCOeval\n            cocoEval = COCOeval(cocoGt, cocoDt, annType[1])\n            cocoEval.evaluate()\n            cocoEval.accumulate()\n            redirect_string = io.StringIO()\n            with contextlib.redirect_stdout(redirect_string):\n                cocoEval.summarize()\n            info += redirect_string.getvalue()\n            return cocoEval.stats[0], cocoEval.stats[1], info\n        else:\n            return 0, 0, info\n"
  },
  {
    "path": "yolox/evaluators/evaluation.py",
    "content": "import os\nimport numpy as np\nimport copy\nimport motmetrics as mm\nmm.lap.default_solver = 'lap'\n\n\nclass Evaluator(object):\n\n    def __init__(self, data_root, seq_name, data_type):\n        self.data_root = data_root\n        self.seq_name = seq_name\n        self.data_type = data_type\n\n        self.load_annotations()\n        self.reset_accumulator()\n\n    def load_annotations(self):\n        assert self.data_type == 'mot'\n\n        gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')\n        self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)\n        self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)\n\n    def reset_accumulator(self):\n        self.acc = mm.MOTAccumulator(auto_id=True)\n\n    def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):\n        # results\n        trk_tlwhs = np.copy(trk_tlwhs)\n        trk_ids = np.copy(trk_ids)\n\n        # gts\n        gt_objs = self.gt_frame_dict.get(frame_id, [])\n        gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]\n\n        # ignore boxes\n        ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])\n        ignore_tlwhs = unzip_objs(ignore_objs)[0]\n\n        # remove ignored results\n        keep = np.ones(len(trk_tlwhs), dtype=bool)\n        iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)\n        if len(iou_distance) > 0:\n            match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)\n            match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])\n            match_ious = iou_distance[match_is, match_js]\n\n            match_js = np.asarray(match_js, dtype=int)\n            match_js = match_js[np.logical_not(np.isnan(match_ious))]\n            keep[match_js] = False\n            trk_tlwhs = trk_tlwhs[keep]\n            trk_ids = trk_ids[keep]\n        #match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)\n        #match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])\n        #match_ious = iou_distance[match_is, match_js]\n\n        #match_js = np.asarray(match_js, dtype=int)\n        #match_js = match_js[np.logical_not(np.isnan(match_ious))]\n        #keep[match_js] = False\n        #trk_tlwhs = trk_tlwhs[keep]\n        #trk_ids = trk_ids[keep]\n\n        # get distance matrix\n        iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)\n\n        # acc\n        self.acc.update(gt_ids, trk_ids, iou_distance)\n\n        if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):\n            events = self.acc.last_mot_events  # only supported by https://github.com/longcw/py-motmetrics\n        else:\n            events = None\n        return events\n\n    def eval_file(self, filename):\n        self.reset_accumulator()\n\n        result_frame_dict = read_results(filename, self.data_type, is_gt=False)\n        #frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))\n        frames = sorted(list(set(result_frame_dict.keys())))\n        for frame_id in frames:\n            trk_objs = result_frame_dict.get(frame_id, [])\n            trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]\n            self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)\n\n        return self.acc\n\n    @staticmethod\n    def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):\n        names = copy.deepcopy(names)\n        if metrics is None:\n            metrics = mm.metrics.motchallenge_metrics\n        metrics = copy.deepcopy(metrics)\n\n        mh = mm.metrics.create()\n        summary = mh.compute_many(\n            accs,\n            metrics=metrics,\n            names=names,\n            generate_overall=True\n        )\n\n        return summary\n\n    @staticmethod\n    def save_summary(summary, filename):\n        import pandas as pd\n        writer = pd.ExcelWriter(filename)\n        summary.to_excel(writer)\n        writer.save()\n\n\n\n\n\ndef read_results(filename, data_type: str, is_gt=False, is_ignore=False):\n    if data_type in ('mot', 'lab'):\n        read_fun = read_mot_results\n    else:\n        raise ValueError('Unknown data type: {}'.format(data_type))\n\n    return read_fun(filename, is_gt, is_ignore)\n\n\n\"\"\"\nlabels={'ped', ...\t\t\t% 1\n'person_on_vhcl', ...\t% 2\n'car', ...\t\t\t\t% 3\n'bicycle', ...\t\t\t% 4\n'mbike', ...\t\t\t% 5\n'non_mot_vhcl', ...\t\t% 6\n'static_person', ...\t% 7\n'distractor', ...\t\t% 8\n'occluder', ...\t\t\t% 9\n'occluder_on_grnd', ...\t\t%10\n'occluder_full', ...\t\t% 11\n'reflection', ...\t\t% 12\n'crowd' ...\t\t\t% 13\n};\n\"\"\"\n\n\ndef read_mot_results(filename, is_gt, is_ignore):\n    valid_labels = {1}\n    ignore_labels = {2, 7, 8, 12}\n    results_dict = dict()\n    if os.path.isfile(filename):\n        with open(filename, 'r') as f:\n            for line in f.readlines():\n                linelist = line.split(',')\n                if len(linelist) < 7:\n                    continue\n                fid = int(linelist[0])\n                if fid < 1:\n                    continue\n                results_dict.setdefault(fid, list())\n\n                box_size = float(linelist[4]) * float(linelist[5])\n\n                if is_gt:\n                    if 'MOT16-' in filename or 'MOT17-' in filename:\n                        label = int(float(linelist[7]))\n                        mark = int(float(linelist[6]))\n                        if mark == 0 or label not in valid_labels:\n                            continue\n                    score = 1\n                elif is_ignore:\n                    if 'MOT16-' in filename or 'MOT17-' in filename:\n                        label = int(float(linelist[7]))\n                        vis_ratio = float(linelist[8])\n                        if label not in ignore_labels and vis_ratio >= 0:\n                            continue\n                    else:\n                        continue\n                    score = 1\n                else:\n                    score = float(linelist[6])\n\n                #if box_size > 7000:\n                #if box_size <= 7000 or box_size >= 15000:\n                #if box_size < 15000:\n                    #continue\n\n                tlwh = tuple(map(float, linelist[2:6]))\n                target_id = int(linelist[1])\n\n                results_dict[fid].append((tlwh, target_id, score))\n\n    return results_dict\n\n\ndef unzip_objs(objs):\n    if len(objs) > 0:\n        tlwhs, ids, scores = zip(*objs)\n    else:\n        tlwhs, ids, scores = [], [], []\n    tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)\n\n    return tlwhs, ids, scores"
  },
  {
    "path": "yolox/exp/__init__.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom .base_exp import BaseExp\nfrom .build import get_exp\nfrom .yolox_base import Exp\n"
  },
  {
    "path": "yolox/exp/base_exp.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch\nfrom torch.nn import Module\n\nfrom yolox.utils import LRScheduler\n\nimport ast\nimport pprint\nfrom abc import ABCMeta, abstractmethod\nfrom tabulate import tabulate\nfrom typing import Dict\n\n\nclass BaseExp(metaclass=ABCMeta):\n    \"\"\"Basic class for any experiment.\"\"\"\n\n    def __init__(self):\n        self.seed = None\n        self.output_dir = \"./DiffusionTrack_outputs\"\n        self.print_interval = 100\n        self.eval_interval = 10\n\n    @abstractmethod\n    def get_model(self) -> Module:\n        pass\n\n    @abstractmethod\n    def get_data_loader(\n        self, batch_size: int, is_distributed: bool\n    ) -> Dict[str, torch.utils.data.DataLoader]:\n        pass\n\n    @abstractmethod\n    def get_optimizer(self, batch_size: int) -> torch.optim.Optimizer:\n        pass\n\n    @abstractmethod\n    def get_lr_scheduler(\n        self, lr: float, iters_per_epoch: int, **kwargs\n    ) -> LRScheduler:\n        pass\n\n    @abstractmethod\n    def get_evaluator(self):\n        pass\n\n    @abstractmethod\n    def eval(self, model, evaluator, weights):\n        pass\n\n    def __repr__(self):\n        table_header = [\"keys\", \"values\"]\n        exp_table = [\n            (str(k), pprint.pformat(v))\n            for k, v in vars(self).items()\n            if not k.startswith(\"_\")\n        ]\n        return tabulate(exp_table, headers=table_header, tablefmt=\"fancy_grid\")\n\n    def merge(self, cfg_list):\n        assert len(cfg_list) % 2 == 0\n        for k, v in zip(cfg_list[0::2], cfg_list[1::2]):\n            # only update value with same key\n            if hasattr(self, k):\n                src_value = getattr(self, k)\n                src_type = type(src_value)\n                if src_value is not None and src_type != type(v):\n                    try:\n                        v = src_type(v)\n                    except Exception:\n                        v = ast.literal_eval(v)\n                setattr(self, k, v)\n"
  },
  {
    "path": "yolox/exp/build.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport importlib\nimport os\nimport sys\n\n\ndef get_exp_by_file(exp_file):\n    try:\n        sys.path.append(os.path.dirname(exp_file))\n        current_exp = importlib.import_module(os.path.basename(exp_file).split(\".\")[0])\n        exp = current_exp.Exp()\n    except Exception:\n        raise ImportError(\"{} doesn't contains class named 'Exp'\".format(exp_file))\n    return exp\n\n\ndef get_exp_by_name(exp_name):\n    import yolox\n\n    yolox_path = os.path.dirname(os.path.dirname(yolox.__file__))\n    filedict = {\n        \"yolox-s\": \"yolox_s.py\",\n        \"yolox-m\": \"yolox_m.py\",\n        \"yolox-l\": \"yolox_l.py\",\n        \"yolox-x\": \"yolox_x.py\",\n        \"yolox-tiny\": \"yolox_tiny.py\",\n        \"yolox-nano\": \"nano.py\",\n        \"yolov3\": \"yolov3.py\",\n    }\n    filename = filedict[exp_name]\n    exp_path = os.path.join(yolox_path, \"exps\", \"default\", filename)\n    return get_exp_by_file(exp_path)\n\n\ndef get_exp(exp_file, exp_name):\n    \"\"\"\n    get Exp object by file or name. If exp_file and exp_name\n    are both provided, get Exp by exp_file.\n\n    Args:\n        exp_file (str): file path of experiment.\n        exp_name (str): name of experiment. \"yolo-s\",\n    \"\"\"\n    assert (\n        exp_file is not None or exp_name is not None\n    ), \"plz provide exp file or exp name.\"\n    if exp_file is not None:\n        return get_exp_by_file(exp_file)\n    else:\n        return get_exp_by_name(exp_name)\n"
  },
  {
    "path": "yolox/exp/yolox_base.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\n\nimport os\nimport random\n\nfrom .base_exp import BaseExp\n\n\nclass Exp(BaseExp):\n    def __init__(self):\n        super().__init__()\n\n        # ---------------- model config ---------------- #\n        self.num_classes = 80\n        self.depth = 1.00\n        self.width = 1.00\n\n        # ---------------- dataloader config ---------------- #\n        # set worker to 4 for shorter dataloader init time\n        self.data_num_workers = 4\n        self.input_size = (640, 640)\n        self.random_size = (14, 26)\n        self.train_ann = \"instances_train2017.json\"\n        self.val_ann = \"instances_val2017.json\"\n\n        # --------------- transform config ----------------- #\n        self.degrees = 10.0\n        self.translate = 0.1\n        self.scale = (0.1, 2)\n        self.mscale = (0.8, 1.6)\n        self.shear = 2.0\n        self.perspective = 0.0\n        self.enable_mixup = True\n\n        # --------------  training config --------------------- #\n        self.warmup_epochs = 5\n        self.max_epoch = 300\n        self.warmup_lr = 0\n        self.basic_lr_per_img = 0.01 / 64.0\n        self.scheduler = \"yoloxwarmcos\"\n        self.no_aug_epochs = 15\n        self.min_lr_ratio = 0.05\n        self.ema = True\n\n        self.weight_decay = 5e-4\n        self.momentum = 0.9\n        self.print_interval = 10\n        self.eval_interval = 10\n        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\n\n        # -----------------  testing config ------------------ #\n        self.test_size = (640, 640)\n        # self.test_conf = 0.001\n        # self.nmsthre = 0.65\n        self.random_flip=False\n\n    def get_model(self):\n        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead\n\n        def init_yolo(M):\n            for m in M.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eps = 1e-3\n                    m.momentum = 0.03\n\n        if getattr(self, \"model\", None) is None:\n            in_channels = [256, 512, 1024]\n            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\n            head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels)\n            self.model = YOLOX(backbone, head)\n\n        self.model.apply(init_yolo)\n        self.model.head.initialize_biases(1e-2)\n        return self.model\n\n    def get_data_loader(self, batch_size, is_distributed, no_aug=False):\n        from yolox.data import (\n            COCODataset,\n            DataLoader,\n            InfiniteSampler,\n            MosaicDetection,\n            TrainTransform,\n            YoloBatchSampler\n        )\n\n        dataset = COCODataset(\n            data_dir=None,\n            json_file=self.train_ann,\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=50,\n            ),\n        )\n\n        dataset = MosaicDetection(\n            dataset,\n            mosaic=not no_aug,\n            img_size=self.input_size,\n            preproc=TrainTransform(\n                rgb_means=(0.485, 0.456, 0.406),\n                std=(0.229, 0.224, 0.225),\n                max_labels=120,\n            ),\n            degrees=self.degrees,\n            translate=self.translate,\n            scale=self.scale,\n            shear=self.shear,\n            perspective=self.perspective,\n            enable_mixup=self.enable_mixup,\n        )\n\n        self.dataset = dataset\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n\n        sampler = InfiniteSampler(len(self.dataset), seed=self.seed if self.seed else 0)\n\n        batch_sampler = YoloBatchSampler(\n            sampler=sampler,\n            batch_size=batch_size,\n            drop_last=False,\n            input_dimension=self.input_size,\n            mosaic=not no_aug,\n        )\n\n        dataloader_kwargs = {\"num_workers\": self.data_num_workers, \"pin_memory\": True}\n        dataloader_kwargs[\"batch_sampler\"] = batch_sampler\n        train_loader = DataLoader(self.dataset, **dataloader_kwargs)\n\n        return train_loader\n\n    def random_resize(self, data_loader, epoch, rank, is_distributed):\n        tensor = torch.LongTensor(2).cuda()\n\n        if rank == 0:\n            size_factor = self.input_size[1] * 1.0 / self.input_size[0]\n            size = random.randint(*self.random_size)\n            size = (int(32 * size), 32 * int(size * size_factor))\n            tensor[0] = size[0]\n            tensor[1] = size[1]\n\n        if is_distributed:\n            dist.barrier()\n            dist.broadcast(tensor, 0)\n\n        input_size = data_loader.change_input_dim(\n            multiple=(tensor[0].item(), tensor[1].item()), random_range=None\n        )\n        return input_size\n\n    def get_optimizer(self, batch_size):\n        if \"optimizer\" not in self.__dict__:\n            if self.warmup_epochs > 0:\n                lr = self.warmup_lr\n            else:\n                lr = self.basic_lr_per_img * batch_size\n\n            pg0, pg1, pg2 = [], [], []  # optimizer parameter groups\n\n            for k, v in self.model.named_modules():\n                if hasattr(v, \"bias\") and isinstance(v.bias, nn.Parameter):\n                    pg2.append(v.bias)  # biases\n                if isinstance(v, nn.BatchNorm2d) or \"bn\" in k:\n                    pg0.append(v.weight)  # no decay\n                elif hasattr(v, \"weight\") and isinstance(v.weight, nn.Parameter):\n                    pg1.append(v.weight)  # apply decay\n\n            optimizer = torch.optim.SGD(\n                pg0, lr=lr, momentum=self.momentum, nesterov=True\n            )\n            optimizer.add_param_group(\n                {\"params\": pg1, \"weight_decay\": self.weight_decay}\n            )  # add pg1 with weight_decay\n            optimizer.add_param_group({\"params\": pg2})\n            self.optimizer = optimizer\n\n        return self.optimizer\n\n    def get_lr_scheduler(self, lr, iters_per_epoch):\n        from yolox.utils import LRScheduler\n\n        scheduler = LRScheduler(\n            self.scheduler,\n            lr,\n            iters_per_epoch,\n            self.max_epoch,\n            warmup_epochs=self.warmup_epochs,\n            warmup_lr_start=self.warmup_lr,\n            no_aug_epochs=self.no_aug_epochs,\n            min_lr_ratio=self.min_lr_ratio,\n        )\n        return scheduler\n\n    def get_eval_loader(self, batch_size, is_distributed, testdev=False):\n        from yolox.data import COCODataset, ValTransform\n\n        valdataset = COCODataset(\n            data_dir=None,\n            json_file=self.val_ann if not testdev else \"image_info_test-dev2017.json\",\n            name=\"val2017\" if not testdev else \"test2017\",\n            img_size=self.test_size,\n            preproc=ValTransform(\n                rgb_means=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)\n            ),\n        )\n\n        if is_distributed:\n            batch_size = batch_size // dist.get_world_size()\n            sampler = torch.utils.data.distributed.DistributedSampler(\n                valdataset, shuffle=False\n            )\n        else:\n            sampler = torch.utils.data.SequentialSampler(valdataset)\n\n        dataloader_kwargs = {\n            \"num_workers\": self.data_num_workers,\n            \"pin_memory\": True,\n            \"sampler\": sampler,\n        }\n        dataloader_kwargs[\"batch_size\"] = batch_size\n        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)\n\n        return val_loader\n\n    def get_evaluator(self, batch_size, is_distributed, testdev=False):\n        from yolox.evaluators import COCOEvaluator\n\n        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)\n        evaluator = COCOEvaluator(\n            dataloader=val_loader,\n            img_size=self.test_size,\n            confthre=self.test_conf,\n            nmsthre=self.nmsthre,\n            num_classes=self.num_classes,\n            testdev=testdev,\n        )\n        return evaluator\n\n    def eval(self, model, evaluator, is_distributed, half=False):\n        return evaluator.evaluate(model, is_distributed, half)\n"
  },
  {
    "path": "yolox/layers/__init__.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom .fast_coco_eval_api import COCOeval_opt\n"
  },
  {
    "path": "yolox/layers/csrc/cocoeval/cocoeval.cpp",
    "content": "// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n#include \"cocoeval.h\"\n#include <time.h>\n#include <algorithm>\n#include <cstdint>\n#include <numeric>\n\nusing namespace pybind11::literals;\n\nnamespace COCOeval {\n\n// Sort detections from highest score to lowest, such that\n// detection_instances[detection_sorted_indices[t]] >=\n// detection_instances[detection_sorted_indices[t+1]].  Use stable_sort to match\n// original COCO API\nvoid SortInstancesByDetectionScore(\n    const std::vector<InstanceAnnotation>& detection_instances,\n    std::vector<uint64_t>* detection_sorted_indices) {\n  detection_sorted_indices->resize(detection_instances.size());\n  std::iota(\n      detection_sorted_indices->begin(), detection_sorted_indices->end(), 0);\n  std::stable_sort(\n      detection_sorted_indices->begin(),\n      detection_sorted_indices->end(),\n      [&detection_instances](size_t j1, size_t j2) {\n        return detection_instances[j1].score > detection_instances[j2].score;\n      });\n}\n\n// Partition the ground truth objects based on whether or not to ignore them\n// based on area\nvoid SortInstancesByIgnore(\n    const std::array<double, 2>& area_range,\n    const std::vector<InstanceAnnotation>& ground_truth_instances,\n    std::vector<uint64_t>* ground_truth_sorted_indices,\n    std::vector<bool>* ignores) {\n  ignores->clear();\n  ignores->reserve(ground_truth_instances.size());\n  for (auto o : ground_truth_instances) {\n    ignores->push_back(\n        o.ignore || o.area < area_range[0] || o.area > area_range[1]);\n  }\n\n  ground_truth_sorted_indices->resize(ground_truth_instances.size());\n  std::iota(\n      ground_truth_sorted_indices->begin(),\n      ground_truth_sorted_indices->end(),\n      0);\n  std::stable_sort(\n      ground_truth_sorted_indices->begin(),\n      ground_truth_sorted_indices->end(),\n      [&ignores](size_t j1, size_t j2) {\n        return (int)(*ignores)[j1] < (int)(*ignores)[j2];\n      });\n}\n\n// For each IOU threshold, greedily match each detected instance to a ground\n// truth instance (if possible) and store the results\nvoid MatchDetectionsToGroundTruth(\n    const std::vector<InstanceAnnotation>& detection_instances,\n    const std::vector<uint64_t>& detection_sorted_indices,\n    const std::vector<InstanceAnnotation>& ground_truth_instances,\n    const std::vector<uint64_t>& ground_truth_sorted_indices,\n    const std::vector<bool>& ignores,\n    const std::vector<std::vector<double>>& ious,\n    const std::vector<double>& iou_thresholds,\n    const std::array<double, 2>& area_range,\n    ImageEvaluation* results) {\n  // Initialize memory to store return data matches and ignore\n  const int num_iou_thresholds = iou_thresholds.size();\n  const int num_ground_truth = ground_truth_sorted_indices.size();\n  const int num_detections = detection_sorted_indices.size();\n  std::vector<uint64_t> ground_truth_matches(\n      num_iou_thresholds * num_ground_truth, 0);\n  std::vector<uint64_t>& detection_matches = results->detection_matches;\n  std::vector<bool>& detection_ignores = results->detection_ignores;\n  std::vector<bool>& ground_truth_ignores = results->ground_truth_ignores;\n  detection_matches.resize(num_iou_thresholds * num_detections, 0);\n  detection_ignores.resize(num_iou_thresholds * num_detections, false);\n  ground_truth_ignores.resize(num_ground_truth);\n  for (auto g = 0; g < num_ground_truth; ++g) {\n    ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]];\n  }\n\n  for (auto t = 0; t < num_iou_thresholds; ++t) {\n    for (auto d = 0; d < num_detections; ++d) {\n      // information about best match so far (match=-1 -> unmatched)\n      double best_iou = std::min(iou_thresholds[t], 1 - 1e-10);\n      int match = -1;\n      for (auto g = 0; g < num_ground_truth; ++g) {\n        // if this ground truth instance is already matched and not a\n        // crowd, it cannot be matched to another detection\n        if (ground_truth_matches[t * num_ground_truth + g] > 0 &&\n            !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) {\n          continue;\n        }\n\n        // if detected instance matched to a regular ground truth\n        // instance, we can break on the first ground truth instance\n        // tagged as ignore (because they are sorted by the ignore tag)\n        if (match >= 0 && !ground_truth_ignores[match] &&\n            ground_truth_ignores[g]) {\n          break;\n        }\n\n        // if IOU overlap is the best so far, store the match appropriately\n        if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) {\n          best_iou = ious[d][ground_truth_sorted_indices[g]];\n          match = g;\n        }\n      }\n      // if match was made, store id of match for both detection and\n      // ground truth\n      if (match >= 0) {\n        detection_ignores[t * num_detections + d] = ground_truth_ignores[match];\n        detection_matches[t * num_detections + d] =\n            ground_truth_instances[ground_truth_sorted_indices[match]].id;\n        ground_truth_matches[t * num_ground_truth + match] =\n            detection_instances[detection_sorted_indices[d]].id;\n      }\n\n      // set unmatched detections outside of area range to ignore\n      const InstanceAnnotation& detection =\n          detection_instances[detection_sorted_indices[d]];\n      detection_ignores[t * num_detections + d] =\n          detection_ignores[t * num_detections + d] ||\n          (detection_matches[t * num_detections + d] == 0 &&\n           (detection.area < area_range[0] || detection.area > area_range[1]));\n    }\n  }\n\n  // store detection score results\n  results->detection_scores.resize(detection_sorted_indices.size());\n  for (size_t d = 0; d < detection_sorted_indices.size(); ++d) {\n    results->detection_scores[d] =\n        detection_instances[detection_sorted_indices[d]].score;\n  }\n}\n\nstd::vector<ImageEvaluation> EvaluateImages(\n    const std::vector<std::array<double, 2>>& area_ranges,\n    int max_detections,\n    const std::vector<double>& iou_thresholds,\n    const ImageCategoryInstances<std::vector<double>>& image_category_ious,\n    const ImageCategoryInstances<InstanceAnnotation>&\n        image_category_ground_truth_instances,\n    const ImageCategoryInstances<InstanceAnnotation>&\n        image_category_detection_instances) {\n  const int num_area_ranges = area_ranges.size();\n  const int num_images = image_category_ground_truth_instances.size();\n  const int num_categories =\n      image_category_ious.size() > 0 ? image_category_ious[0].size() : 0;\n  std::vector<uint64_t> detection_sorted_indices;\n  std::vector<uint64_t> ground_truth_sorted_indices;\n  std::vector<bool> ignores;\n  std::vector<ImageEvaluation> results_all(\n      num_images * num_area_ranges * num_categories);\n\n  // Store results for each image, category, and area range combination. Results\n  // for each IOU threshold are packed into the same ImageEvaluation object\n  for (auto i = 0; i < num_images; ++i) {\n    for (auto c = 0; c < num_categories; ++c) {\n      const std::vector<InstanceAnnotation>& ground_truth_instances =\n          image_category_ground_truth_instances[i][c];\n      const std::vector<InstanceAnnotation>& detection_instances =\n          image_category_detection_instances[i][c];\n\n      SortInstancesByDetectionScore(\n          detection_instances, &detection_sorted_indices);\n      if ((int)detection_sorted_indices.size() > max_detections) {\n        detection_sorted_indices.resize(max_detections);\n      }\n\n      for (size_t a = 0; a < area_ranges.size(); ++a) {\n        SortInstancesByIgnore(\n            area_ranges[a],\n            ground_truth_instances,\n            &ground_truth_sorted_indices,\n            &ignores);\n\n        MatchDetectionsToGroundTruth(\n            detection_instances,\n            detection_sorted_indices,\n            ground_truth_instances,\n            ground_truth_sorted_indices,\n            ignores,\n            image_category_ious[i][c],\n            iou_thresholds,\n            area_ranges[a],\n            &results_all\n                [c * num_area_ranges * num_images + a * num_images + i]);\n      }\n    }\n  }\n\n  return results_all;\n}\n\n// Convert a python list to a vector\ntemplate <typename T>\nstd::vector<T> list_to_vec(const py::list& l) {\n  std::vector<T> v(py::len(l));\n  for (int i = 0; i < (int)py::len(l); ++i) {\n    v[i] = l[i].cast<T>();\n  }\n  return v;\n}\n\n// Helper function to Accumulate()\n// Considers the evaluation results applicable to a particular category, area\n// range, and max_detections parameter setting, which begin at\n// evaluations[evaluation_index].  Extracts a sorted list of length n of all\n// applicable detection instances concatenated across all images in the dataset,\n// which are represented by the outputs evaluation_indices, detection_scores,\n// image_detection_indices, and detection_sorted_indices--all of which are\n// length n. evaluation_indices[i] stores the applicable index into\n// evaluations[] for instance i, which has detection score detection_score[i],\n// and is the image_detection_indices[i]'th of the list of detections\n// for the image containing i.  detection_sorted_indices[] defines a sorted\n// permutation of the 3 other outputs\nint BuildSortedDetectionList(\n    const std::vector<ImageEvaluation>& evaluations,\n    const int64_t evaluation_index,\n    const int64_t num_images,\n    const int max_detections,\n    std::vector<uint64_t>* evaluation_indices,\n    std::vector<double>* detection_scores,\n    std::vector<uint64_t>* detection_sorted_indices,\n    std::vector<uint64_t>* image_detection_indices) {\n  assert(evaluations.size() >= evaluation_index + num_images);\n\n  // Extract a list of object instances of the applicable category, area\n  // range, and max detections requirements such that they can be sorted\n  image_detection_indices->clear();\n  evaluation_indices->clear();\n  detection_scores->clear();\n  image_detection_indices->reserve(num_images * max_detections);\n  evaluation_indices->reserve(num_images * max_detections);\n  detection_scores->reserve(num_images * max_detections);\n  int num_valid_ground_truth = 0;\n  for (auto i = 0; i < num_images; ++i) {\n    const ImageEvaluation& evaluation = evaluations[evaluation_index + i];\n\n    for (int d = 0;\n         d < (int)evaluation.detection_scores.size() && d < max_detections;\n         ++d) { // detected instances\n      evaluation_indices->push_back(evaluation_index + i);\n      image_detection_indices->push_back(d);\n      detection_scores->push_back(evaluation.detection_scores[d]);\n    }\n    for (auto ground_truth_ignore : evaluation.ground_truth_ignores) {\n      if (!ground_truth_ignore) {\n        ++num_valid_ground_truth;\n      }\n    }\n  }\n\n  // Sort detections by decreasing score, using stable sort to match\n  // python implementation\n  detection_sorted_indices->resize(detection_scores->size());\n  std::iota(\n      detection_sorted_indices->begin(), detection_sorted_indices->end(), 0);\n  std::stable_sort(\n      detection_sorted_indices->begin(),\n      detection_sorted_indices->end(),\n      [&detection_scores](size_t j1, size_t j2) {\n        return (*detection_scores)[j1] > (*detection_scores)[j2];\n      });\n\n  return num_valid_ground_truth;\n}\n\n// Helper function to Accumulate()\n// Compute a precision recall curve given a sorted list of detected instances\n// encoded in evaluations, evaluation_indices, detection_scores,\n// detection_sorted_indices, image_detection_indices (see\n// BuildSortedDetectionList()). Using vectors precisions and recalls\n// and temporary storage, output the results into precisions_out, recalls_out,\n// and scores_out, which are large buffers containing many precion/recall curves\n// for all possible parameter settings, with precisions_out_index and\n// recalls_out_index defining the applicable indices to store results.\nvoid ComputePrecisionRecallCurve(\n    const int64_t precisions_out_index,\n    const int64_t precisions_out_stride,\n    const int64_t recalls_out_index,\n    const std::vector<double>& recall_thresholds,\n    const int iou_threshold_index,\n    const int num_iou_thresholds,\n    const int num_valid_ground_truth,\n    const std::vector<ImageEvaluation>& evaluations,\n    const std::vector<uint64_t>& evaluation_indices,\n    const std::vector<double>& detection_scores,\n    const std::vector<uint64_t>& detection_sorted_indices,\n    const std::vector<uint64_t>& image_detection_indices,\n    std::vector<double>* precisions,\n    std::vector<double>* recalls,\n    std::vector<double>* precisions_out,\n    std::vector<double>* scores_out,\n    std::vector<double>* recalls_out) {\n  assert(recalls_out->size() > recalls_out_index);\n\n  // Compute precision/recall for each instance in the sorted list of detections\n  int64_t true_positives_sum = 0, false_positives_sum = 0;\n  precisions->clear();\n  recalls->clear();\n  precisions->reserve(detection_sorted_indices.size());\n  recalls->reserve(detection_sorted_indices.size());\n  assert(!evaluations.empty() || detection_sorted_indices.empty());\n  for (auto detection_sorted_index : detection_sorted_indices) {\n    const ImageEvaluation& evaluation =\n        evaluations[evaluation_indices[detection_sorted_index]];\n    const auto num_detections =\n        evaluation.detection_matches.size() / num_iou_thresholds;\n    const auto detection_index = iou_threshold_index * num_detections +\n        image_detection_indices[detection_sorted_index];\n    assert(evaluation.detection_matches.size() > detection_index);\n    assert(evaluation.detection_ignores.size() > detection_index);\n    const int64_t detection_match =\n        evaluation.detection_matches[detection_index];\n    const bool detection_ignores =\n        evaluation.detection_ignores[detection_index];\n    const auto true_positive = detection_match > 0 && !detection_ignores;\n    const auto false_positive = detection_match == 0 && !detection_ignores;\n    if (true_positive) {\n      ++true_positives_sum;\n    }\n    if (false_positive) {\n      ++false_positives_sum;\n    }\n\n    const double recall =\n        static_cast<double>(true_positives_sum) / num_valid_ground_truth;\n    recalls->push_back(recall);\n    const int64_t num_valid_detections =\n        true_positives_sum + false_positives_sum;\n    const double precision = num_valid_detections > 0\n        ? static_cast<double>(true_positives_sum) / num_valid_detections\n        : 0.0;\n    precisions->push_back(precision);\n  }\n\n  (*recalls_out)[recalls_out_index] = !recalls->empty() ? recalls->back() : 0;\n\n  for (int64_t i = static_cast<int64_t>(precisions->size()) - 1; i > 0; --i) {\n    if ((*precisions)[i] > (*precisions)[i - 1]) {\n      (*precisions)[i - 1] = (*precisions)[i];\n    }\n  }\n\n  // Sample the per instance precision/recall list at each recall threshold\n  for (size_t r = 0; r < recall_thresholds.size(); ++r) {\n    // first index in recalls >= recall_thresholds[r]\n    std::vector<double>::iterator low = std::lower_bound(\n        recalls->begin(), recalls->end(), recall_thresholds[r]);\n    size_t precisions_index = low - recalls->begin();\n\n    const auto results_ind = precisions_out_index + r * precisions_out_stride;\n    assert(results_ind < precisions_out->size());\n    assert(results_ind < scores_out->size());\n    if (precisions_index < precisions->size()) {\n      (*precisions_out)[results_ind] = (*precisions)[precisions_index];\n      (*scores_out)[results_ind] =\n          detection_scores[detection_sorted_indices[precisions_index]];\n    } else {\n      (*precisions_out)[results_ind] = 0;\n      (*scores_out)[results_ind] = 0;\n    }\n  }\n}\npy::dict Accumulate(\n    const py::object& params,\n    const std::vector<ImageEvaluation>& evaluations) {\n  const std::vector<double> recall_thresholds =\n      list_to_vec<double>(params.attr(\"recThrs\"));\n  const std::vector<int> max_detections =\n      list_to_vec<int>(params.attr(\"maxDets\"));\n  const int num_iou_thresholds = py::len(params.attr(\"iouThrs\"));\n  const int num_recall_thresholds = py::len(params.attr(\"recThrs\"));\n  const int num_categories = params.attr(\"useCats\").cast<int>() == 1\n      ? py::len(params.attr(\"catIds\"))\n      : 1;\n  const int num_area_ranges = py::len(params.attr(\"areaRng\"));\n  const int num_max_detections = py::len(params.attr(\"maxDets\"));\n  const int num_images = py::len(params.attr(\"imgIds\"));\n\n  std::vector<double> precisions_out(\n      num_iou_thresholds * num_recall_thresholds * num_categories *\n          num_area_ranges * num_max_detections,\n      -1);\n  std::vector<double> recalls_out(\n      num_iou_thresholds * num_categories * num_area_ranges *\n          num_max_detections,\n      -1);\n  std::vector<double> scores_out(\n      num_iou_thresholds * num_recall_thresholds * num_categories *\n          num_area_ranges * num_max_detections,\n      -1);\n\n  // Consider the list of all detected instances in the entire dataset in one\n  // large list.  evaluation_indices, detection_scores,\n  // image_detection_indices, and detection_sorted_indices all have the same\n  // length as this list, such that each entry corresponds to one detected\n  // instance\n  std::vector<uint64_t> evaluation_indices; // indices into evaluations[]\n  std::vector<double> detection_scores; // detection scores of each instance\n  std::vector<uint64_t> detection_sorted_indices; // sorted indices of all\n                                                  // instances in the dataset\n  std::vector<uint64_t>\n      image_detection_indices; // indices into the list of detected instances in\n                               // the same image as each instance\n  std::vector<double> precisions, recalls;\n\n  for (auto c = 0; c < num_categories; ++c) {\n    for (auto a = 0; a < num_area_ranges; ++a) {\n      for (auto m = 0; m < num_max_detections; ++m) {\n        // The COCO PythonAPI assumes evaluations[] (the return value of\n        // COCOeval::EvaluateImages() is one long list storing results for each\n        // combination of category, area range, and image id, with categories in\n        // the outermost loop and images in the innermost loop.\n        const int64_t evaluations_index =\n            c * num_area_ranges * num_images + a * num_images;\n        int num_valid_ground_truth = BuildSortedDetectionList(\n            evaluations,\n            evaluations_index,\n            num_images,\n            max_detections[m],\n            &evaluation_indices,\n            &detection_scores,\n            &detection_sorted_indices,\n            &image_detection_indices);\n\n        if (num_valid_ground_truth == 0) {\n          continue;\n        }\n\n        for (auto t = 0; t < num_iou_thresholds; ++t) {\n          // recalls_out is a flattened vectors representing a\n          // num_iou_thresholds X num_categories X num_area_ranges X\n          // num_max_detections matrix\n          const int64_t recalls_out_index =\n              t * num_categories * num_area_ranges * num_max_detections +\n              c * num_area_ranges * num_max_detections +\n              a * num_max_detections + m;\n\n          // precisions_out and scores_out are flattened vectors\n          // representing a num_iou_thresholds X num_recall_thresholds X\n          // num_categories X num_area_ranges X num_max_detections matrix\n          const int64_t precisions_out_stride =\n              num_categories * num_area_ranges * num_max_detections;\n          const int64_t precisions_out_index = t * num_recall_thresholds *\n                  num_categories * num_area_ranges * num_max_detections +\n              c * num_area_ranges * num_max_detections +\n              a * num_max_detections + m;\n\n          ComputePrecisionRecallCurve(\n              precisions_out_index,\n              precisions_out_stride,\n              recalls_out_index,\n              recall_thresholds,\n              t,\n              num_iou_thresholds,\n              num_valid_ground_truth,\n              evaluations,\n              evaluation_indices,\n              detection_scores,\n              detection_sorted_indices,\n              image_detection_indices,\n              &precisions,\n              &recalls,\n              &precisions_out,\n              &scores_out,\n              &recalls_out);\n        }\n      }\n    }\n  }\n\n  time_t rawtime;\n  struct tm local_time;\n  std::array<char, 200> buffer;\n  time(&rawtime);\n#ifdef _WIN32\n  localtime_s(&local_time, &rawtime);\n#else\n  localtime_r(&rawtime, &local_time);\n#endif\n  strftime(\n      buffer.data(), 200, \"%Y-%m-%d %H:%num_max_detections:%S\", &local_time);\n  return py::dict(\n      \"params\"_a = params,\n      \"counts\"_a = std::vector<int64_t>({num_iou_thresholds,\n                                         num_recall_thresholds,\n                                         num_categories,\n                                         num_area_ranges,\n                                         num_max_detections}),\n      \"date\"_a = buffer,\n      \"precision\"_a = precisions_out,\n      \"recall\"_a = recalls_out,\n      \"scores\"_a = scores_out);\n}\n\n} // namespace COCOeval\n"
  },
  {
    "path": "yolox/layers/csrc/cocoeval/cocoeval.h",
    "content": "// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n#pragma once\n\n#include <pybind11/numpy.h>\n#include <pybind11/pybind11.h>\n#include <pybind11/stl.h>\n#include <pybind11/stl_bind.h>\n#include <vector>\n\nnamespace py = pybind11;\n\nnamespace COCOeval {\n\n// Annotation data for a single object instance in an image\nstruct InstanceAnnotation {\n  InstanceAnnotation(\n      uint64_t id,\n      double score,\n      double area,\n      bool is_crowd,\n      bool ignore)\n      : id{id}, score{score}, area{area}, is_crowd{is_crowd}, ignore{ignore} {}\n  uint64_t id;\n  double score = 0.;\n  double area = 0.;\n  bool is_crowd = false;\n  bool ignore = false;\n};\n\n// Stores intermediate results for evaluating detection results for a single\n// image that has D detected instances and G ground truth instances. This stores\n// matches between detected and ground truth instances\nstruct ImageEvaluation {\n  // For each of the D detected instances, the id of the matched ground truth\n  // instance, or 0 if unmatched\n  std::vector<uint64_t> detection_matches;\n\n  // The detection score of each of the D detected instances\n  std::vector<double> detection_scores;\n\n  // Marks whether or not each of G instances was ignored from evaluation (e.g.,\n  // because it's outside area_range)\n  std::vector<bool> ground_truth_ignores;\n\n  // Marks whether or not each of D instances was ignored from evaluation (e.g.,\n  // because it's outside aRng)\n  std::vector<bool> detection_ignores;\n};\n\ntemplate <class T>\nusing ImageCategoryInstances = std::vector<std::vector<std::vector<T>>>;\n\n// C++ implementation of COCO API cocoeval.py::COCOeval.evaluateImg().  For each\n// combination of image, category, area range settings, and IOU thresholds to\n// evaluate, it matches detected instances to ground truth instances and stores\n// the results into a vector of ImageEvaluation results, which will be\n// interpreted by the COCOeval::Accumulate() function to produce precion-recall\n// curves.  The parameters of nested vectors have the following semantics:\n//   image_category_ious[i][c][d][g] is the intersection over union of the d'th\n//     detected instance and g'th ground truth instance of\n//     category category_ids[c] in image image_ids[i]\n//   image_category_ground_truth_instances[i][c] is a vector of ground truth\n//     instances in image image_ids[i] of category category_ids[c]\n//   image_category_detection_instances[i][c] is a vector of detected\n//     instances in image image_ids[i] of category category_ids[c]\nstd::vector<ImageEvaluation> EvaluateImages(\n    const std::vector<std::array<double, 2>>& area_ranges, // vector of 2-tuples\n    int max_detections,\n    const std::vector<double>& iou_thresholds,\n    const ImageCategoryInstances<std::vector<double>>& image_category_ious,\n    const ImageCategoryInstances<InstanceAnnotation>&\n        image_category_ground_truth_instances,\n    const ImageCategoryInstances<InstanceAnnotation>&\n        image_category_detection_instances);\n\n// C++ implementation of COCOeval.accumulate(), which generates precision\n// recall curves for each set of category, IOU threshold, detection area range,\n// and max number of detections parameters.  It is assumed that the parameter\n// evaluations is the return value of the functon COCOeval::EvaluateImages(),\n// which was called with the same parameter settings params\npy::dict Accumulate(\n    const py::object& params,\n    const std::vector<ImageEvaluation>& evalutations);\n\n} // namespace COCOeval\n"
  },
  {
    "path": "yolox/layers/csrc/vision.cpp",
    "content": "#include \"cocoeval/cocoeval.h\"\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n    m.def(\"COCOevalAccumulate\", &COCOeval::Accumulate, \"COCOeval::Accumulate\");\n    m.def(\n        \"COCOevalEvaluateImages\",\n        &COCOeval::EvaluateImages,\n        \"COCOeval::EvaluateImages\");\n    pybind11::class_<COCOeval::InstanceAnnotation>(m, \"InstanceAnnotation\")\n        .def(pybind11::init<uint64_t, double, double, bool, bool>());\n    pybind11::class_<COCOeval::ImageEvaluation>(m, \"ImageEvaluation\")\n        .def(pybind11::init<>());\n}\n"
  },
  {
    "path": "yolox/layers/fast_coco_eval_api.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# This file comes from\n# https://github.com/facebookresearch/detectron2/blob/master/detectron2/evaluation/fast_eval_api.py\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport numpy as np\nfrom pycocotools.cocoeval import COCOeval\n\n# import torch first to make yolox._C work without ImportError of libc10.so\n# in YOLOX, env is already set in __init__.py.\nfrom yolox import _C\n\nimport copy\nimport time\n\n\nclass COCOeval_opt(COCOeval):\n    \"\"\"\n    This is a slightly modified version of the original COCO API, where the functions evaluateImg()\n    and accumulate() are implemented in C++ to speedup evaluation\n    \"\"\"\n\n    def evaluate(self):\n        \"\"\"\n        Run per image evaluation on given images and store results in self.evalImgs_cpp, a\n        datastructure that isn't readable from Python but is used by a c++ implementation of\n        accumulate().  Unlike the original COCO PythonAPI, we don't populate the datastructure\n        self.evalImgs because this datastructure is a computational bottleneck.\n        :return: None\n        \"\"\"\n        tic = time.time()\n\n        print(\"Running per image evaluation...\")\n        p = self.params\n        # add backward compatibility if useSegm is specified in params\n        if p.useSegm is not None:\n            p.iouType = \"segm\" if p.useSegm == 1 else \"bbox\"\n            print(\n                \"useSegm (deprecated) is not None. Running {} evaluation\".format(\n                    p.iouType\n                )\n            )\n        print(\"Evaluate annotation type *{}*\".format(p.iouType))\n        p.imgIds = list(np.unique(p.imgIds))\n        if p.useCats:\n            p.catIds = list(np.unique(p.catIds))\n        p.maxDets = sorted(p.maxDets)\n        self.params = p\n\n        self._prepare()\n\n        # loop through images, area range, max detection number\n        catIds = p.catIds if p.useCats else [-1]\n\n        if p.iouType == \"segm\" or p.iouType == \"bbox\":\n            computeIoU = self.computeIoU\n        elif p.iouType == \"keypoints\":\n            computeIoU = self.computeOks\n        self.ious = {\n            (imgId, catId): computeIoU(imgId, catId)\n            for imgId in p.imgIds\n            for catId in catIds\n        }\n\n        maxDet = p.maxDets[-1]\n\n        # <<<< Beginning of code differences with original COCO API\n        def convert_instances_to_cpp(instances, is_det=False):\n            # Convert annotations for a list of instances in an image to a format that's fast\n            # to access in C++\n            instances_cpp = []\n            for instance in instances:\n                instance_cpp = _C.InstanceAnnotation(\n                    int(instance[\"id\"]),\n                    instance[\"score\"] if is_det else instance.get(\"score\", 0.0),\n                    instance[\"area\"],\n                    bool(instance.get(\"iscrowd\", 0)),\n                    bool(instance.get(\"ignore\", 0)),\n                )\n                instances_cpp.append(instance_cpp)\n            return instances_cpp\n\n        # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++\n        ground_truth_instances = [\n            [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]\n            for imgId in p.imgIds\n        ]\n        detected_instances = [\n            [\n                convert_instances_to_cpp(self._dts[imgId, catId], is_det=True)\n                for catId in p.catIds\n            ]\n            for imgId in p.imgIds\n        ]\n        ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]\n\n        if not p.useCats:\n            # For each image, flatten per-category lists into a single list\n            ground_truth_instances = [\n                [[o for c in i for o in c]] for i in ground_truth_instances\n            ]\n            detected_instances = [\n                [[o for c in i for o in c]] for i in detected_instances\n            ]\n\n        # Call C++ implementation of self.evaluateImgs()\n        self._evalImgs_cpp = _C.COCOevalEvaluateImages(\n            p.areaRng,\n            maxDet,\n            p.iouThrs,\n            ious,\n            ground_truth_instances,\n            detected_instances,\n        )\n        self._evalImgs = None\n\n        self._paramsEval = copy.deepcopy(self.params)\n        toc = time.time()\n        print(\"COCOeval_opt.evaluate() finished in {:0.2f} seconds.\".format(toc - tic))\n        # >>>> End of code differences with original COCO API\n\n    def accumulate(self):\n        \"\"\"\n        Accumulate per image evaluation results and store the result in self.eval.  Does not\n        support changing parameter settings from those used by self.evaluate()\n        \"\"\"\n        print(\"Accumulating evaluation results...\")\n        tic = time.time()\n        if not hasattr(self, \"_evalImgs_cpp\"):\n            print(\"Please run evaluate() first\")\n\n        self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)\n\n        # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections\n        self.eval[\"recall\"] = np.array(self.eval[\"recall\"]).reshape(\n            self.eval[\"counts\"][:1] + self.eval[\"counts\"][2:]\n        )\n\n        # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X\n        # num_area_ranges X num_max_detections\n        self.eval[\"precision\"] = np.array(self.eval[\"precision\"]).reshape(\n            self.eval[\"counts\"]\n        )\n        self.eval[\"scores\"] = np.array(self.eval[\"scores\"]).reshape(self.eval[\"counts\"])\n        toc = time.time()\n        print(\n            \"COCOeval_opt.accumulate() finished in {:0.2f} seconds.\".format(toc - tic)\n        )\n"
  },
  {
    "path": "yolox/models/__init__.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom .darknet import CSPDarknet, Darknet\nfrom .losses import IOUloss\nfrom .yolo_fpn import YOLOFPN\nfrom .yolo_head import YOLOXHead\nfrom .yolo_pafpn import YOLOPAFPN\nfrom .yolox import YOLOX\n"
  },
  {
    "path": "yolox/models/darknet.py",
    "content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom torch import nn\n\nfrom .network_blocks import BaseConv, CSPLayer, DWConv, Focus, ResLayer, SPPBottleneck\n\n\nclass Darknet(nn.Module):\n    # number of blocks from dark2 to dark5.\n    depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]}\n\n    def __init__(\n        self,\n        depth,\n        in_channels=3,\n        stem_out_channels=32,\n        out_features=(\"dark3\", \"dark4\", \"dark5\"),\n    ):\n        \"\"\"\n        Args:\n            depth (int): depth of darknet used in model, usually use [21, 53] for this param.\n            in_channels (int): number of input channels, for example, use 3 for RGB image.\n            stem_out_channels (int): number of output chanels of darknet stem.\n                It decides channels of darknet layer2 to layer5.\n            out_features (Tuple[str]): desired output layer name.\n        \"\"\"\n        super().__init__()\n        assert out_features, \"please provide output features of Darknet\"\n        self.out_features = out_features\n        self.stem = nn.Sequential(\n            BaseConv(in_channels, stem_out_channels, ksize=3, stride=1, act=\"lrelu\"),\n            *self.make_group_layer(stem_out_channels, num_blocks=1, stride=2),\n        )\n        in_channels = stem_out_channels * 2  # 64\n\n        num_blocks = Darknet.depth2blocks[depth]\n        # create darknet with `stem_out_channels` and `num_blocks` layers.\n        # to make model structure more clear, we don't use `for` statement in python.\n        self.dark2 = nn.Sequential(\n            *self.make_group_layer(in_channels, num_blocks[0], stride=2)\n        )\n        in_channels *= 2  # 128\n        self.dark3 = nn.Sequential(\n            *self.make_group_layer(in_channels, num_blocks[1], stride=2)\n        )\n        in_channels *= 2  # 256\n        self.dark4 = nn.Sequential(\n            *self.make_group_layer(in_channels, num_blocks[2], stride=2)\n        )\n        in_channels *= 2  # 512\n\n        self.dark5 = nn.Sequential(\n            *self.make_group_layer(in_channels, num_blocks[3], stride=2),\n            *self.make_spp_block([in_channels, in_channels * 2], in_channels * 2),\n        )\n        # self.diffusion_freeze()\n\n    def diffusion_freeze(self):\n        for v in self.stem.parameters():\n            v.requires_grad=False\n        for v in self.dark2.parameters():\n            v.requires_grad=False\n\n    def make_group_layer(self, in_channels: int, num_blocks: int, stride: int = 1):\n        \"starts with conv layer then has `num_blocks` `ResLayer`\"\n        return [\n            BaseConv(in_channels, in_channels * 2, ksize=3, stride=stride, act=\"lrelu\"),\n            *[(ResLayer(in_channels * 2)) for _ in range(num_blocks)],\n        ]\n\n    def make_spp_block(self, filters_list, in_filters):\n        m = nn.Sequential(\n            *[\n                BaseConv(in_filters, filters_list[0], 1, stride=1, act=\"lrelu\"),\n                BaseConv(filters_list[0], filters_list[1], 3, stride=1, act=\"lrelu\"),\n                SPPBottleneck(\n                    in_channels=filters_list[1],\n                    out_channels=filters_list[0],\n                    activation=\"lrelu\",\n                ),\n                BaseConv(filters_list[0], filters_list[1], 3, stride=1, act=\"lrelu\"),\n                BaseConv(filters_list[1], filters_list[0], 1, stride=1, act=\"lrelu\"),\n            ]\n        )\n        return m\n\n    def forward(self, x):\n        outputs = {}\n        x = self.stem(x)\n        outputs[\"stem\"] = x\n        x = self.dark2(x)\n        outputs[\"dark2\"] = x\n        x = self.dark3(x)\n        outputs[\"dark3\"] = x\n        x = self.dark4(x)\n        outputs[\"dark4\"] = x\n        x = self.dark5(x)\n        outputs[\"dark5\"] = x\n        return {k: v for k, v in outputs.items() if k in self.out_features}\n\n\nclass CSPDarknet(nn.Module):\n    def __init__(\n        self,\n        dep_mul,\n        wid_mul,\n        out_features=(\"dark3\", \"dark4\", \"dark5\"),\n        depthwise=False,\n        act=\"silu\",\n    ):\n        super().__init__()\n        assert out_features, \"please provide output features of Darknet\"\n        self.out_features = out_features\n        Conv = DWConv if depthwise else BaseConv\n\n        base_channels = int(wid_mul * 64)  # 64\n        base_depth = max(round(dep_mul * 3), 1)  # 3\n\n        # stem\n        self.stem = Focus(3, base_channels, ksize=3, act=act)\n\n        # dark2\n        self.dark2 = nn.Sequential(\n            Conv(base_channels, base_channels * 2, 3, 2, act=act),\n            CSPLayer(\n                base_channels * 2,\n                base_channels * 2,\n                n=base_depth,\n                depthwise=depthwise,\n                act=act,\n            ),\n        )\n\n        # dark3\n        self.dark3 = nn.Sequential(\n            Conv(base_channels * 2, base_channels * 4, 3, 2, act=act),\n            CSPLayer(\n                base_channels * 4,\n                base_channels * 4,\n                n=base_depth * 3,\n                depthwise=depthwise,\n                act=act,\n            ),\n        )\n\n        # dark4\n        self.dark4 = nn.Sequential(\n            Conv(base_channels * 4, base_channels * 8, 3, 2, act=act),\n            CSPLayer(\n                base_channels * 8,\n                base_channels * 8,\n                n=base_depth * 3,\n                depthwise=depthwise,\n                act=act,\n            ),\n        )\n\n        # dark5\n        self.dark5 = nn.Sequential(\n            Conv(base_channels * 8, base_channels * 16, 3, 2, act=act),\n            SPPBottleneck(base_channels * 16, base_channels * 16, activation=act),\n            CSPLayer(\n                base_channels * 16,\n                base_channels * 16,\n                n=base_depth,\n                shortcut=False,\n                depthwise=depthwise,\n                act=act,\n            ),\n        )\n\n    def forward(self, x):\n        outputs = {}\n        x = self.stem(x)\n        outputs[\"stem\"] = x\n        x = self.dark2(x)\n        outputs[\"dark2\"] = x\n        x = self.dark3(x)\n        outputs[\"dark3\"] = x\n        x = self.dark4(x)\n        outputs[\"dark4\"] = x\n        x = self.dark5(x)\n        outputs[\"dark5\"] = x\n        return {k: v for k, v in outputs.items() if k in self.out_features}\n"
  },
  {
    "path": "yolox/models/losses.py",
    "content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass IOUloss(nn.Module):\n    def __init__(self, reduction=\"none\", loss_type=\"iou\"):\n        super(IOUloss, self).__init__()\n        self.reduction = reduction\n        self.loss_type = loss_type\n\n    def forward(self, pred, target):\n        assert pred.shape[0] == target.shape[0]\n\n        pred = pred.view(-1, 4)\n        target = target.view(-1, 4)\n        tl = torch.max(\n            (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)\n        )\n        br = torch.min(\n            (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)\n        )\n\n        area_p = torch.prod(pred[:, 2:], 1)\n        area_g = torch.prod(target[:, 2:], 1)\n\n        en = (tl < br).type(tl.type()).prod(dim=1)\n        area_i = torch.prod(br - tl, 1) * en\n        iou = (area_i) / (area_p + area_g - area_i + 1e-16)\n\n        if self.loss_type == \"iou\":\n            loss = 1 - iou ** 2\n        elif self.loss_type == \"giou\":\n            c_tl = torch.min(\n                (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)\n            )\n            c_br = torch.max(\n                (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)\n            )\n            area_c = torch.prod(c_br - c_tl, 1)\n            giou = iou - (area_c - area_i) / area_c.clamp(1e-16)\n            loss = 1 - giou.clamp(min=-1.0, max=1.0)\n\n        if self.reduction == \"mean\":\n            loss = loss.mean()\n        elif self.reduction == \"sum\":\n            loss = loss.sum()\n\n        return loss\n\n\ndef sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n    \"\"\"\n    Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n    Args:\n        inputs: A float tensor of arbitrary shape.\n                The predictions for each example.\n        targets: A float tensor with the same shape as inputs. Stores the binary\n                 classification label for each element in inputs\n                (0 for the negative class and 1 for the positive class).\n        alpha: (optional) Weighting factor in range (0,1) to balance\n                positive vs negative examples. Default = -1 (no weighting).\n        gamma: Exponent of the modulating factor (1 - p_t) to\n               balance easy vs hard examples.\n    Returns:\n        Loss tensor\n    \"\"\"\n    prob = inputs.sigmoid()\n    ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n    p_t = prob * targets + (1 - prob) * (1 - targets)\n    loss = ce_loss * ((1 - p_t) ** gamma)\n\n    if alpha >= 0:\n        alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n        loss = alpha_t * loss\n    #return loss.mean(0).sum() / num_boxes\n    return loss.sum() / num_boxes"
  },
  {
    "path": "yolox/models/network_blocks.py",
    "content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch\nimport torch.nn as nn\n\n\nclass SiLU(nn.Module):\n    \"\"\"export-friendly version of nn.SiLU()\"\"\"\n\n    @staticmethod\n    def forward(x):\n        return x * torch.sigmoid(x)\n\n\ndef get_activation(name=\"silu\", inplace=True):\n    if name == \"silu\":\n        module = nn.SiLU(inplace=inplace)\n    elif name == \"relu\":\n        module = nn.ReLU(inplace=inplace)\n    elif name == \"lrelu\":\n        module = nn.LeakyReLU(0.1, inplace=inplace)\n    else:\n        raise AttributeError(\"Unsupported act type: {}\".format(name))\n    return module\n\n\nclass BaseConv(nn.Module):\n    \"\"\"A Conv2d -> Batchnorm -> silu/leaky relu block\"\"\"\n\n    def __init__(\n        self, in_channels, out_channels, ksize, stride, groups=1, bias=False, act=\"silu\"\n    ):\n        super().__init__()\n        # same padding\n        pad = (ksize - 1) // 2\n        self.conv = nn.Conv2d(\n            in_channels,\n            out_channels,\n            kernel_size=ksize,\n            stride=stride,\n            padding=pad,\n            groups=groups,\n            bias=bias,\n        )\n        self.bn = nn.BatchNorm2d(out_channels)\n        self.act = get_activation(act, inplace=True)\n\n    def forward(self, x):\n        return self.act(self.bn(self.conv(x)))\n\n    def fuseforward(self, x):\n        return self.act(self.conv(x))\n\n\nclass DWConv(nn.Module):\n    \"\"\"Depthwise Conv + Conv\"\"\"\n\n    def __init__(self, in_channels, out_channels, ksize, stride=1, act=\"silu\"):\n        super().__init__()\n        self.dconv = BaseConv(\n            in_channels,\n            in_channels,\n            ksize=ksize,\n            stride=stride,\n            groups=in_channels,\n            act=act,\n        )\n        self.pconv = BaseConv(\n            in_channels, out_channels, ksize=1, stride=1, groups=1, act=act\n        )\n\n    def forward(self, x):\n        x = self.dconv(x)\n        return self.pconv(x)\n\n\nclass Bottleneck(nn.Module):\n    # Standard bottleneck\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        shortcut=True,\n        expansion=0.5,\n        depthwise=False,\n        act=\"silu\",\n    ):\n        super().__init__()\n        hidden_channels = int(out_channels * expansion)\n        Conv = DWConv if depthwise else BaseConv\n        self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)\n        self.conv2 = Conv(hidden_channels, out_channels, 3, stride=1, act=act)\n        self.use_add = shortcut and in_channels == out_channels\n\n    def forward(self, x):\n        y = self.conv2(self.conv1(x))\n        if self.use_add:\n            y = y + x\n        return y\n\n\nclass ResLayer(nn.Module):\n    \"Residual layer with `in_channels` inputs.\"\n\n    def __init__(self, in_channels: int):\n        super().__init__()\n        mid_channels = in_channels // 2\n        self.layer1 = BaseConv(\n            in_channels, mid_channels, ksize=1, stride=1, act=\"lrelu\"\n        )\n        self.layer2 = BaseConv(\n            mid_channels, in_channels, ksize=3, stride=1, act=\"lrelu\"\n        )\n\n    def forward(self, x):\n        out = self.layer2(self.layer1(x))\n        return x + out\n\n\nclass SPPBottleneck(nn.Module):\n    \"\"\"Spatial pyramid pooling layer used in YOLOv3-SPP\"\"\"\n\n    def __init__(\n        self, in_channels, out_channels, kernel_sizes=(5, 9, 13), activation=\"silu\"\n    ):\n        super().__init__()\n        hidden_channels = in_channels // 2\n        self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=activation)\n        self.m = nn.ModuleList(\n            [\n                nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)\n                for ks in kernel_sizes\n            ]\n        )\n        conv2_channels = hidden_channels * (len(kernel_sizes) + 1)\n        self.conv2 = BaseConv(conv2_channels, out_channels, 1, stride=1, act=activation)\n\n    def forward(self, x):\n        x = self.conv1(x)\n        x = torch.cat([x] + [m(x) for m in self.m], dim=1)\n        x = self.conv2(x)\n        return x\n\n\nclass CSPLayer(nn.Module):\n    \"\"\"C3 in yolov5, CSP Bottleneck with 3 convolutions\"\"\"\n\n    def __init__(\n        self,\n        in_channels,\n        out_channels,\n        n=1,\n        shortcut=True,\n        expansion=0.5,\n        depthwise=False,\n        act=\"silu\",\n    ):\n        \"\"\"\n        Args:\n            in_channels (int): input channels.\n            out_channels (int): output channels.\n            n (int): number of Bottlenecks. Default value: 1.\n        \"\"\"\n        # ch_in, ch_out, number, shortcut, groups, expansion\n        super().__init__()\n        hidden_channels = int(out_channels * expansion)  # hidden channels\n        self.conv1 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)\n        self.conv2 = BaseConv(in_channels, hidden_channels, 1, stride=1, act=act)\n        self.conv3 = BaseConv(2 * hidden_channels, out_channels, 1, stride=1, act=act)\n        module_list = [\n            Bottleneck(\n                hidden_channels, hidden_channels, shortcut, 1.0, depthwise, act=act\n            )\n            for _ in range(n)\n        ]\n        self.m = nn.Sequential(*module_list)\n\n    def forward(self, x):\n        x_1 = self.conv1(x)\n        x_2 = self.conv2(x)\n        x_1 = self.m(x_1)\n        x = torch.cat((x_1, x_2), dim=1)\n        return self.conv3(x)\n\n\nclass Focus(nn.Module):\n    \"\"\"Focus width and height information into channel space.\"\"\"\n\n    def __init__(self, in_channels, out_channels, ksize=1, stride=1, act=\"silu\"):\n        super().__init__()\n        self.conv = BaseConv(in_channels * 4, out_channels, ksize, stride, act=act)\n\n    def forward(self, x):\n        # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)\n        patch_top_left = x[..., ::2, ::2]\n        patch_top_right = x[..., ::2, 1::2]\n        patch_bot_left = x[..., 1::2, ::2]\n        patch_bot_right = x[..., 1::2, 1::2]\n        x = torch.cat(\n            (\n                patch_top_left,\n                patch_bot_left,\n                patch_top_right,\n                patch_bot_right,\n            ),\n            dim=1,\n        )\n        return self.conv(x)\n"
  },
  {
    "path": "yolox/models/yolo_fpn.py",
    "content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch\nimport torch.nn as nn\n\nfrom .darknet import Darknet\nfrom .network_blocks import BaseConv\n\n\nclass YOLOFPN(nn.Module):\n    \"\"\"\n    YOLOFPN module. Darknet 53 is the default backbone of this model.\n    \"\"\"\n\n    def __init__(\n        self,\n        depth=53,\n        in_features=[\"dark3\", \"dark4\", \"dark5\"],\n    ):\n        super().__init__()\n\n        self.backbone = Darknet(depth)\n        self.in_features = in_features\n\n        # out 1\n        self.out1_cbl = self._make_cbl(512, 256, 1)\n        self.out1 = self._make_embedding([256, 512], 512 + 256)\n\n        # out 2\n        self.out2_cbl = self._make_cbl(256, 128, 1)\n        self.out2 = self._make_embedding([128, 256], 256 + 128)\n\n        # upsample\n        self.upsample = nn.Upsample(scale_factor=2, mode=\"nearest\")\n\n    def _make_cbl(self, _in, _out, ks):\n        return BaseConv(_in, _out, ks, stride=1, act=\"lrelu\")\n\n    def _make_embedding(self, filters_list, in_filters):\n        m = nn.Sequential(\n            *[\n                self._make_cbl(in_filters, filters_list[0], 1),\n                self._make_cbl(filters_list[0], filters_list[1], 3),\n                self._make_cbl(filters_list[1], filters_list[0], 1),\n                self._make_cbl(filters_list[0], filters_list[1], 3),\n                self._make_cbl(filters_list[1], filters_list[0], 1),\n            ]\n        )\n        return m\n\n    def load_pretrained_model(self, filename=\"./weights/darknet53.mix.pth\"):\n        with open(filename, \"rb\") as f:\n            state_dict = torch.load(f, map_location=\"cpu\")\n        print(\"loading pretrained weights...\")\n        self.backbone.load_state_dict(state_dict)\n\n    def forward(self, inputs):\n        \"\"\"\n        Args:\n            inputs (Tensor): input image.\n\n        Returns:\n            Tuple[Tensor]: FPN output features..\n        \"\"\"\n        #  backbone\n        out_features = self.backbone(inputs)\n        x2, x1, x0 = [out_features[f] for f in self.in_features]\n\n        #  yolo branch 1\n        x1_in = self.out1_cbl(x0)\n        x1_in = self.upsample(x1_in)\n        x1_in = torch.cat([x1_in, x1], 1)\n        out_dark4 = self.out1(x1_in)\n\n        #  yolo branch 2\n        x2_in = self.out2_cbl(out_dark4)\n        x2_in = self.upsample(x2_in)\n        x2_in = torch.cat([x2_in, x2], 1)\n        out_dark3 = self.out2(x2_in)\n\n        outputs = (out_dark3, out_dark4, x0)\n        return outputs\n"
  },
  {
    "path": "yolox/models/yolo_head.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom loguru import logger\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom yolox.utils import bboxes_iou\n\nimport math\n\nfrom .losses import IOUloss\nfrom .network_blocks import BaseConv, DWConv\n\n\nclass YOLOXHead(nn.Module):\n    def __init__(\n        self,\n        num_classes,\n        width=1.0,\n        strides=[8, 16, 32],\n        in_channels=[256, 512, 1024],\n        act=\"silu\",\n        depthwise=False,\n    ):\n        \"\"\"\n        Args:\n            act (str): activation type of conv. Defalut value: \"silu\".\n            depthwise (bool): wheather apply depthwise conv in conv branch. Defalut value: False.\n        \"\"\"\n        super().__init__()\n\n        self.n_anchors = 1\n        self.num_classes = num_classes\n        self.decode_in_inference = True  # for deploy, set to False\n\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        self.cls_preds = nn.ModuleList()\n        self.reg_preds = nn.ModuleList()\n        self.obj_preds = nn.ModuleList()\n        self.stems = nn.ModuleList()\n        Conv = DWConv if depthwise else BaseConv\n\n        for i in range(len(in_channels)):\n            self.stems.append(\n                BaseConv(\n                    in_channels=int(in_channels[i] * width),\n                    out_channels=int(256 * width),\n                    ksize=1,\n                    stride=1,\n                    act=act,\n                )\n            )\n            self.cls_convs.append(\n                nn.Sequential(\n                    *[\n                        Conv(\n                            in_channels=int(256 * width),\n                            out_channels=int(256 * width),\n                            ksize=3,\n                            stride=1,\n                            act=act,\n                        ),\n                        Conv(\n                            in_channels=int(256 * width),\n                            out_channels=int(256 * width),\n                            ksize=3,\n                            stride=1,\n                            act=act,\n                        ),\n                    ]\n                )\n            )\n            self.reg_convs.append(\n                nn.Sequential(\n                    *[\n                        Conv(\n                            in_channels=int(256 * width),\n                            out_channels=int(256 * width),\n                            ksize=3,\n                            stride=1,\n                            act=act,\n                        ),\n                        Conv(\n                            in_channels=int(256 * width),\n                            out_channels=int(256 * width),\n                            ksize=3,\n                            stride=1,\n                            act=act,\n                        ),\n                    ]\n                )\n            )\n            self.cls_preds.append(\n                nn.Conv2d(\n                    in_channels=int(256 * width),\n                    out_channels=self.n_anchors * self.num_classes,\n                    kernel_size=1,\n                    stride=1,\n                    padding=0,\n                )\n            )\n            self.reg_preds.append(\n                nn.Conv2d(\n                    in_channels=int(256 * width),\n                    out_channels=4,\n                    kernel_size=1,\n                    stride=1,\n                    padding=0,\n                )\n            )\n            self.obj_preds.append(\n                nn.Conv2d(\n                    in_channels=int(256 * width),\n                    out_channels=self.n_anchors * 1,\n                    kernel_size=1,\n                    stride=1,\n                    padding=0,\n                )\n            )\n\n        self.use_l1 = False\n        self.l1_loss = nn.L1Loss(reduction=\"none\")\n        self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction=\"none\")\n        self.iou_loss = IOUloss(reduction=\"none\")\n        self.strides = strides\n        self.grids = [torch.zeros(1)] * len(in_channels)\n        self.expanded_strides = [None] * len(in_channels)\n\n    def initialize_biases(self, prior_prob):\n        for conv in self.cls_preds:\n            b = conv.bias.view(self.n_anchors, -1)\n            b.data.fill_(-math.log((1 - prior_prob) / prior_prob))\n            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n        for conv in self.obj_preds:\n            b = conv.bias.view(self.n_anchors, -1)\n            b.data.fill_(-math.log((1 - prior_prob) / prior_prob))\n            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n    def forward(self, xin, labels=None, imgs=None):\n        outputs = []\n        origin_preds = []\n        x_shifts = []\n        y_shifts = []\n        expanded_strides = []\n\n        for k, (cls_conv, reg_conv, stride_this_level, x) in enumerate(\n            zip(self.cls_convs, self.reg_convs, self.strides, xin)\n        ):\n            x = self.stems[k](x)\n            cls_x = x\n            reg_x = x\n\n            cls_feat = cls_conv(cls_x)\n            cls_output = self.cls_preds[k](cls_feat)\n\n            reg_feat = reg_conv(reg_x)\n            reg_output = self.reg_preds[k](reg_feat)\n            obj_output = self.obj_preds[k](reg_feat)\n\n            if self.training:\n                output = torch.cat([reg_output, obj_output, cls_output], 1)\n                output, grid = self.get_output_and_grid(\n                    output, k, stride_this_level, xin[0].type()\n                )\n                x_shifts.append(grid[:, :, 0])\n                y_shifts.append(grid[:, :, 1])\n                expanded_strides.append(\n                    torch.zeros(1, grid.shape[1])\n                    .fill_(stride_this_level)\n                    .type_as(xin[0])\n                )\n                if self.use_l1:\n                    batch_size = reg_output.shape[0]\n                    hsize, wsize = reg_output.shape[-2:]\n                    reg_output = reg_output.view(\n                        batch_size, self.n_anchors, 4, hsize, wsize\n                    )\n                    reg_output = reg_output.permute(0, 1, 3, 4, 2).reshape(\n                        batch_size, -1, 4\n                    )\n                    origin_preds.append(reg_output.clone())\n\n            else:\n                output = torch.cat(\n                    [reg_output, obj_output.sigmoid(), cls_output.sigmoid()], 1\n                )\n\n            outputs.append(output)\n\n        if self.training:\n            return self.get_losses(\n                imgs,\n                x_shifts,\n                y_shifts,\n                expanded_strides,\n                labels,\n                torch.cat(outputs, 1),\n                origin_preds,\n                dtype=xin[0].dtype,\n            )\n        else:\n            self.hw = [x.shape[-2:] for x in outputs]\n            # [batch, n_anchors_all, 85]\n            outputs = torch.cat(\n                [x.flatten(start_dim=2) for x in outputs], dim=2\n            ).permute(0, 2, 1)\n            if self.decode_in_inference:\n                return self.decode_outputs(outputs, dtype=xin[0].type())\n            else:\n                return outputs\n\n    def get_output_and_grid(self, output, k, stride, dtype):\n        grid = self.grids[k]\n\n        batch_size = output.shape[0]\n        n_ch = 5 + self.num_classes\n        hsize, wsize = output.shape[-2:]\n        if grid.shape[2:4] != output.shape[2:4]:\n            yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])\n            grid = torch.stack((xv, yv), 2).view(1, 1, hsize, wsize, 2).type(dtype)\n            self.grids[k] = grid\n\n        output = output.view(batch_size, self.n_anchors, n_ch, hsize, wsize)\n        output = output.permute(0, 1, 3, 4, 2).reshape(\n            batch_size, self.n_anchors * hsize * wsize, -1\n        )\n        grid = grid.view(1, -1, 2)\n        output[..., :2] = (output[..., :2] + grid) * stride\n        output[..., 2:4] = torch.exp(output[..., 2:4]) * stride\n        return output, grid\n\n    def decode_outputs(self, outputs, dtype):\n        grids = []\n        strides = []\n        for (hsize, wsize), stride in zip(self.hw, self.strides):\n            yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])\n            grid = torch.stack((xv, yv), 2).view(1, -1, 2)\n            grids.append(grid)\n            shape = grid.shape[:2]\n            strides.append(torch.full((*shape, 1), stride))\n\n        grids = torch.cat(grids, dim=1).type(dtype)\n        strides = torch.cat(strides, dim=1).type(dtype)\n\n        outputs[..., :2] = (outputs[..., :2] + grids) * strides\n        outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides\n        return outputs\n\n    def get_losses(\n        self,\n        imgs,\n        x_shifts,\n        y_shifts,\n        expanded_strides,\n        labels,\n        outputs,\n        origin_preds,\n        dtype,\n    ):\n        bbox_preds = outputs[:, :, :4]  # [batch, n_anchors_all, 4]\n        obj_preds = outputs[:, :, 4].unsqueeze(-1)  # [batch, n_anchors_all, 1]\n        cls_preds = outputs[:, :, 5:]  # [batch, n_anchors_all, n_cls]\n\n        # calculate targets\n        mixup = labels.shape[2] > 5\n        if mixup:\n            label_cut = labels[..., :5]\n        else:\n            label_cut = labels\n        nlabel = (label_cut.sum(dim=2) > 0).sum(dim=1)  # number of objects\n\n        total_num_anchors = outputs.shape[1]\n        x_shifts = torch.cat(x_shifts, 1)  # [1, n_anchors_all]\n        y_shifts = torch.cat(y_shifts, 1)  # [1, n_anchors_all]\n        expanded_strides = torch.cat(expanded_strides, 1)\n        if self.use_l1:\n            origin_preds = torch.cat(origin_preds, 1)\n\n        cls_targets = []\n        reg_targets = []\n        l1_targets = []\n        obj_targets = []\n        fg_masks = []\n\n        num_fg = 0.0\n        num_gts = 0.0\n\n        for batch_idx in range(outputs.shape[0]):\n            num_gt = int(nlabel[batch_idx])\n            num_gts += num_gt\n            if num_gt == 0:\n                cls_target = outputs.new_zeros((0, self.num_classes))\n                reg_target = outputs.new_zeros((0, 4))\n                l1_target = outputs.new_zeros((0, 4))\n                obj_target = outputs.new_zeros((total_num_anchors, 1))\n                fg_mask = outputs.new_zeros(total_num_anchors).bool()\n            else:\n                gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5]\n                gt_classes = labels[batch_idx, :num_gt, 0]\n                bboxes_preds_per_image = bbox_preds[batch_idx]\n                \n                try:\n                    (\n                        gt_matched_classes,\n                        fg_mask,\n                        pred_ious_this_matching,\n                        matched_gt_inds,\n                        num_fg_img,\n                    ) = self.get_assignments(  # noqa\n                        batch_idx,\n                        num_gt,\n                        total_num_anchors,\n                        gt_bboxes_per_image,\n                        gt_classes,\n                        bboxes_preds_per_image,\n                        expanded_strides,\n                        x_shifts,\n                        y_shifts,\n                        cls_preds,\n                        bbox_preds,\n                        obj_preds,\n                        labels,\n                        imgs,\n                    )\n                except RuntimeError:\n                    logger.info(\n                        \"OOM RuntimeError is raised due to the huge memory cost during label assignment. \\\n                           CPU mode is applied in this batch. If you want to avoid this issue, \\\n                           try to reduce the batch size or image size.\"\n                    )\n                    print(\"OOM RuntimeError is raised due to the huge memory cost during label assignment. \\\n                           CPU mode is applied in this batch. If you want to avoid this issue, \\\n                           try to reduce the batch size or image size.\")\n                    torch.cuda.empty_cache()\n                    (\n                        gt_matched_classes,\n                        fg_mask,\n                        pred_ious_this_matching,\n                        matched_gt_inds,\n                        num_fg_img,\n                    ) = self.get_assignments(  # noqa\n                        batch_idx,\n                        num_gt,\n                        total_num_anchors,\n                        gt_bboxes_per_image,\n                        gt_classes,\n                        bboxes_preds_per_image,\n                        expanded_strides,\n                        x_shifts,\n                        y_shifts,\n                        cls_preds,\n                        bbox_preds,\n                        obj_preds,\n                        labels,\n                        imgs,\n                        \"cpu\",\n                    )\n                \n                \n                torch.cuda.empty_cache()\n                num_fg += num_fg_img\n\n                cls_target = F.one_hot(\n                    gt_matched_classes.to(torch.int64), self.num_classes\n                ) * pred_ious_this_matching.unsqueeze(-1)\n                obj_target = fg_mask.unsqueeze(-1)\n                reg_target = gt_bboxes_per_image[matched_gt_inds]\n\n                if self.use_l1:\n                    l1_target = self.get_l1_target(\n                        outputs.new_zeros((num_fg_img, 4)),\n                        gt_bboxes_per_image[matched_gt_inds],\n                        expanded_strides[0][fg_mask],\n                        x_shifts=x_shifts[0][fg_mask],\n                        y_shifts=y_shifts[0][fg_mask],\n                    )\n\n            cls_targets.append(cls_target)\n            reg_targets.append(reg_target)\n            obj_targets.append(obj_target.to(dtype))\n            fg_masks.append(fg_mask)\n            if self.use_l1:\n                l1_targets.append(l1_target)\n\n        cls_targets = torch.cat(cls_targets, 0)\n        reg_targets = torch.cat(reg_targets, 0)\n        obj_targets = torch.cat(obj_targets, 0)\n        fg_masks = torch.cat(fg_masks, 0)\n        if self.use_l1:\n            l1_targets = torch.cat(l1_targets, 0)\n\n        num_fg = max(num_fg, 1)\n        loss_iou = (\n            self.iou_loss(bbox_preds.view(-1, 4)[fg_masks], reg_targets)\n        ).sum() / num_fg\n        loss_obj = (\n            self.bcewithlog_loss(obj_preds.view(-1, 1), obj_targets)\n        ).sum() / num_fg\n        loss_cls = (\n            self.bcewithlog_loss(\n                cls_preds.view(-1, self.num_classes)[fg_masks], cls_targets\n            )\n        ).sum() / num_fg\n        if self.use_l1:\n            loss_l1 = (\n                self.l1_loss(origin_preds.view(-1, 4)[fg_masks], l1_targets)\n            ).sum() / num_fg\n        else:\n            loss_l1 = 0.0\n\n        reg_weight = 5.0\n        loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1\n\n        return (\n            loss,\n            reg_weight * loss_iou,\n            loss_obj,\n            loss_cls,\n            loss_l1,\n            num_fg / max(num_gts, 1),\n        )\n\n    def get_l1_target(self, l1_target, gt, stride, x_shifts, y_shifts, eps=1e-8):\n        l1_target[:, 0] = gt[:, 0] / stride - x_shifts\n        l1_target[:, 1] = gt[:, 1] / stride - y_shifts\n        l1_target[:, 2] = torch.log(gt[:, 2] / stride + eps)\n        l1_target[:, 3] = torch.log(gt[:, 3] / stride + eps)\n        return l1_target\n\n    @torch.no_grad()\n    def get_assignments(\n        self,\n        batch_idx,\n        num_gt,\n        total_num_anchors,\n        gt_bboxes_per_image,\n        gt_classes,\n        bboxes_preds_per_image,\n        expanded_strides,\n        x_shifts,\n        y_shifts,\n        cls_preds,\n        bbox_preds,\n        obj_preds,\n        labels,\n        imgs,\n        mode=\"gpu\",\n    ):\n\n        if mode == \"cpu\":\n            print(\"------------CPU Mode for This Batch-------------\")\n            gt_bboxes_per_image = gt_bboxes_per_image.cpu().float()\n            bboxes_preds_per_image = bboxes_preds_per_image.cpu().float()\n            gt_classes = gt_classes.cpu().float()\n            expanded_strides = expanded_strides.cpu().float()\n            x_shifts = x_shifts.cpu()\n            y_shifts = y_shifts.cpu()\n\n        img_size = imgs.shape[2:]\n        fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(\n            gt_bboxes_per_image,\n            expanded_strides,\n            x_shifts,\n            y_shifts,\n            total_num_anchors,\n            num_gt,\n            img_size\n        )\n\n        bboxes_preds_per_image = bboxes_preds_per_image[fg_mask]\n        cls_preds_ = cls_preds[batch_idx][fg_mask]\n        obj_preds_ = obj_preds[batch_idx][fg_mask]\n        num_in_boxes_anchor = bboxes_preds_per_image.shape[0]\n\n        if mode == \"cpu\":\n            gt_bboxes_per_image = gt_bboxes_per_image.cpu()\n            bboxes_preds_per_image = bboxes_preds_per_image.cpu()\n\n        pair_wise_ious = bboxes_iou(gt_bboxes_per_image, bboxes_preds_per_image, False)\n\n        gt_cls_per_image = (\n            F.one_hot(gt_classes.to(torch.int64), self.num_classes)\n            .float()\n            .unsqueeze(1)\n            .repeat(1, num_in_boxes_anchor, 1)\n        )\n        pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)\n\n        if mode == \"cpu\":\n            cls_preds_, obj_preds_ = cls_preds_.cpu(), obj_preds_.cpu()\n\n        with torch.cuda.amp.autocast(enabled=False):\n            cls_preds_ = (\n                cls_preds_.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()\n                * obj_preds_.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()\n            )\n            pair_wise_cls_loss = F.binary_cross_entropy(\n                cls_preds_.sqrt_(), gt_cls_per_image, reduction=\"none\"\n            ).sum(-1)\n        del cls_preds_\n\n        cost = (\n            pair_wise_cls_loss\n            + 3.0 * pair_wise_ious_loss\n            + 100000.0 * (~is_in_boxes_and_center)\n        )\n\n        (\n            num_fg,\n            gt_matched_classes,\n            pred_ious_this_matching,\n            matched_gt_inds,\n        ) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt, fg_mask)\n        del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss\n\n        if mode == \"cpu\":\n            gt_matched_classes = gt_matched_classes.cuda()\n            fg_mask = fg_mask.cuda()\n            pred_ious_this_matching = pred_ious_this_matching.cuda()\n            matched_gt_inds = matched_gt_inds.cuda()\n\n        return (\n            gt_matched_classes,\n            fg_mask,\n            pred_ious_this_matching,\n            matched_gt_inds,\n            num_fg,\n        )\n\n    def get_in_boxes_info(\n        self,\n        gt_bboxes_per_image,\n        expanded_strides,\n        x_shifts,\n        y_shifts,\n        total_num_anchors,\n        num_gt,\n        img_size\n    ):\n        expanded_strides_per_image = expanded_strides[0]\n        x_shifts_per_image = x_shifts[0] * expanded_strides_per_image\n        y_shifts_per_image = y_shifts[0] * expanded_strides_per_image\n        x_centers_per_image = (\n            (x_shifts_per_image + 0.5 * expanded_strides_per_image)\n            .unsqueeze(0)\n            .repeat(num_gt, 1)\n        )  # [n_anchor] -> [n_gt, n_anchor]\n        y_centers_per_image = (\n            (y_shifts_per_image + 0.5 * expanded_strides_per_image)\n            .unsqueeze(0)\n            .repeat(num_gt, 1)\n        )\n\n        gt_bboxes_per_image_l = (\n            (gt_bboxes_per_image[:, 0] - 0.5 * gt_bboxes_per_image[:, 2])\n            .unsqueeze(1)\n            .repeat(1, total_num_anchors)\n        )\n        gt_bboxes_per_image_r = (\n            (gt_bboxes_per_image[:, 0] + 0.5 * gt_bboxes_per_image[:, 2])\n            .unsqueeze(1)\n            .repeat(1, total_num_anchors)\n        )\n        gt_bboxes_per_image_t = (\n            (gt_bboxes_per_image[:, 1] - 0.5 * gt_bboxes_per_image[:, 3])\n            .unsqueeze(1)\n            .repeat(1, total_num_anchors)\n        )\n        gt_bboxes_per_image_b = (\n            (gt_bboxes_per_image[:, 1] + 0.5 * gt_bboxes_per_image[:, 3])\n            .unsqueeze(1)\n            .repeat(1, total_num_anchors)\n        )\n\n        b_l = x_centers_per_image - gt_bboxes_per_image_l\n        b_r = gt_bboxes_per_image_r - x_centers_per_image\n        b_t = y_centers_per_image - gt_bboxes_per_image_t\n        b_b = gt_bboxes_per_image_b - y_centers_per_image\n        bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2)\n\n        is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0\n        is_in_boxes_all = is_in_boxes.sum(dim=0) > 0\n        # in fixed center\n\n        center_radius = 2.5\n        # clip center inside image\n        gt_bboxes_per_image_clip = gt_bboxes_per_image[:, 0:2].clone()\n        gt_bboxes_per_image_clip[:, 0] = torch.clamp(gt_bboxes_per_image_clip[:, 0], min=0, max=img_size[1])\n        gt_bboxes_per_image_clip[:, 1] = torch.clamp(gt_bboxes_per_image_clip[:, 1], min=0, max=img_size[0])\n\n        gt_bboxes_per_image_l = (gt_bboxes_per_image_clip[:, 0]).unsqueeze(1).repeat(\n            1, total_num_anchors\n        ) - center_radius * expanded_strides_per_image.unsqueeze(0)\n        gt_bboxes_per_image_r = (gt_bboxes_per_image_clip[:, 0]).unsqueeze(1).repeat(\n            1, total_num_anchors\n        ) + center_radius * expanded_strides_per_image.unsqueeze(0)\n        gt_bboxes_per_image_t = (gt_bboxes_per_image_clip[:, 1]).unsqueeze(1).repeat(\n            1, total_num_anchors\n        ) - center_radius * expanded_strides_per_image.unsqueeze(0)\n        gt_bboxes_per_image_b = (gt_bboxes_per_image_clip[:, 1]).unsqueeze(1).repeat(\n            1, total_num_anchors\n        ) + center_radius * expanded_strides_per_image.unsqueeze(0)\n\n        c_l = x_centers_per_image - gt_bboxes_per_image_l\n        c_r = gt_bboxes_per_image_r - x_centers_per_image\n        c_t = y_centers_per_image - gt_bboxes_per_image_t\n        c_b = gt_bboxes_per_image_b - y_centers_per_image\n        center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)\n        is_in_centers = center_deltas.min(dim=-1).values > 0.0\n        is_in_centers_all = is_in_centers.sum(dim=0) > 0\n\n        # in boxes and in centers\n        is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all\n\n        is_in_boxes_and_center = (\n            is_in_boxes[:, is_in_boxes_anchor] & is_in_centers[:, is_in_boxes_anchor]\n        )\n        del gt_bboxes_per_image_clip\n        return is_in_boxes_anchor, is_in_boxes_and_center\n\n    def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt, fg_mask):\n        # Dynamic K\n        # ---------------------------------------------------------------\n        matching_matrix = torch.zeros_like(cost)\n\n        ious_in_boxes_matrix = pair_wise_ious\n        n_candidate_k = min(10, ious_in_boxes_matrix.size(1))\n        topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)\n        dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)\n        for gt_idx in range(num_gt):\n            _, pos_idx = torch.topk(\n                cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False\n            )\n            matching_matrix[gt_idx][pos_idx] = 1.0\n\n        del topk_ious, dynamic_ks, pos_idx\n\n        anchor_matching_gt = matching_matrix.sum(0)\n        if (anchor_matching_gt > 1).sum() > 0:\n            cost_min, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)\n            matching_matrix[:, anchor_matching_gt > 1] *= 0.0\n            matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0\n        fg_mask_inboxes = matching_matrix.sum(0) > 0.0\n        num_fg = fg_mask_inboxes.sum().item()\n\n        fg_mask[fg_mask.clone()] = fg_mask_inboxes\n\n        matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)\n        gt_matched_classes = gt_classes[matched_gt_inds]\n\n        pred_ious_this_matching = (matching_matrix * pair_wise_ious).sum(0)[\n            fg_mask_inboxes\n        ]\n        return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds\n"
  },
  {
    "path": "yolox/models/yolo_pafpn.py",
    "content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch\nimport torch.nn as nn\n\nfrom .darknet import CSPDarknet\nfrom .network_blocks import BaseConv, CSPLayer, DWConv\n\n\nclass YOLOPAFPN(nn.Module):\n    \"\"\"\n    YOLOv3 model. Darknet 53 is the default backbone of this model.\n    \"\"\"\n\n    def __init__(\n        self,\n        depth=1.0,\n        width=1.0,\n        in_features=(\"dark3\", \"dark4\", \"dark5\"),\n        in_channels=[256, 512, 1024],\n        depthwise=False,\n        act=\"silu\",\n    ):\n        super().__init__()\n        self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)\n        self.in_features = in_features\n        self.in_channels = in_channels\n        Conv = DWConv if depthwise else BaseConv\n\n        self.upsample = nn.Upsample(scale_factor=2, mode=\"nearest\")\n        self.lateral_conv0 = BaseConv(\n            int(in_channels[2] * width), int(in_channels[1] * width), 1, 1, act=act\n        )\n        self.C3_p4 = CSPLayer(\n            int(2 * in_channels[1] * width),\n            int(in_channels[1] * width),\n            round(3 * depth),\n            False,\n            depthwise=depthwise,\n            act=act,\n        )  # cat\n\n        self.reduce_conv1 = BaseConv(\n            int(in_channels[1] * width), int(in_channels[0] * width), 1, 1, act=act\n        )\n        self.C3_p3 = CSPLayer(\n            int(2 * in_channels[0] * width),\n            int(in_channels[0] * width),\n            round(3 * depth),\n            False,\n            depthwise=depthwise,\n            act=act,\n        )\n\n        # bottom-up conv\n        self.bu_conv2 = Conv(\n            int(in_channels[0] * width), int(in_channels[0] * width), 3, 2, act=act\n        )\n        self.C3_n3 = CSPLayer(\n            int(2 * in_channels[0] * width),\n            int(in_channels[1] * width),\n            round(3 * depth),\n            False,\n            depthwise=depthwise,\n            act=act,\n        )\n\n        # bottom-up conv\n        self.bu_conv1 = Conv(\n            int(in_channels[1] * width), int(in_channels[1] * width), 3, 2, act=act\n        )\n        self.C3_n4 = CSPLayer(\n            int(2 * in_channels[1] * width),\n            int(in_channels[2] * width),\n            round(3 * depth),\n            False,\n            depthwise=depthwise,\n            act=act,\n        )\n\n\n    def forward(self, input):\n        \"\"\"\n        Args:\n            inputs: input images.\n\n        Returns:\n            Tuple[Tensor]: FPN feature.\n        \"\"\"\n\n        #  backbone\n        out_features = self.backbone(input)\n        features = [out_features[f] for f in self.in_features]\n        [x2, x1, x0] = features\n\n        fpn_out0 = self.lateral_conv0(x0)  # 1024->512/32\n        f_out0 = self.upsample(fpn_out0)  # 512/16\n        f_out0 = torch.cat([f_out0, x1], 1)  # 512->1024/16\n        f_out0 = self.C3_p4(f_out0)  # 1024->512/16\n\n        fpn_out1 = self.reduce_conv1(f_out0)  # 512->256/16\n        f_out1 = self.upsample(fpn_out1)  # 256/8\n        f_out1 = torch.cat([f_out1, x2], 1)  # 256->512/8\n        pan_out2 = self.C3_p3(f_out1)  # 512->256/8\n\n        p_out1 = self.bu_conv2(pan_out2)  # 256->256/16\n        p_out1 = torch.cat([p_out1, fpn_out1], 1)  # 256->512/16\n        pan_out1 = self.C3_n3(p_out1)  # 512->512/16\n\n        p_out0 = self.bu_conv1(pan_out1)  # 512->512/32\n        p_out0 = torch.cat([p_out0, fpn_out0], 1)  # 512->1024/32\n        pan_out0 = self.C3_n4(p_out0)  # 1024->1024/32\n\n        outputs = (pan_out2, pan_out1, pan_out0)\n        return outputs\n"
  },
  {
    "path": "yolox/models/yolox.py",
    "content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch.nn as nn\n\nfrom .yolo_head import YOLOXHead\nfrom .yolo_pafpn import YOLOPAFPN\n\nclass YOLOX(nn.Module):\n    \"\"\"\n    YOLOX model module. The module list is defined by create_yolov3_modules function.\n    The network returns loss values from three YOLO layers during training\n    and detection results during test.\n    \"\"\"\n\n    def __init__(self, backbone=None, head=None):\n        super().__init__()\n        if backbone is None:\n            backbone = YOLOPAFPN()\n        if head is None:\n            head = YOLOXHead(80)\n\n        self.backbone = backbone\n        self.head = head\n\n    def forward(self, x, targets=None):\n        # fpn output content features of [dark3, dark4, dark5]\n        fpn_outs = self.backbone(x)\n\n        if self.training:\n            assert targets is not None\n            loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg = self.head(\n                fpn_outs, targets, x\n            )\n            outputs = {\n                \"total_loss\": loss,\n                \"iou_loss\": iou_loss,\n                \"l1_loss\": l1_loss,\n                \"conf_loss\": conf_loss,\n                \"cls_loss\": cls_loss,\n                \"num_fg\": num_fg,\n            }\n        else:\n            outputs = self.head(fpn_outs)\n\n        return outputs\n"
  },
  {
    "path": "yolox/tracker/basetrack.py",
    "content": "import numpy as np\nfrom collections import OrderedDict\n\n\nclass TrackState(object):\n    New = 0\n    Tracked = 1\n    Lost = 2\n    Removed = 3\n\n\nclass BaseTrack(object):\n    _count = 0\n\n    track_id = 0\n    is_activated = False\n    state = TrackState.New\n\n    history = OrderedDict()\n    features = []\n    curr_feature = None\n    score = 0\n    start_frame = 0\n    frame_id = 0\n    time_since_update = 0\n\n    # multi-camera\n    location = (np.inf, np.inf)\n\n    @property\n    def end_frame(self):\n        return self.frame_id\n\n    @staticmethod\n    def next_id():\n        BaseTrack._count += 1\n        return BaseTrack._count\n\n    def activate(self, *args):\n        raise NotImplementedError\n\n    def predict(self):\n        raise NotImplementedError\n\n    def update(self, *args, **kwargs):\n        raise NotImplementedError\n\n    def mark_lost(self):\n        self.state = TrackState.Lost\n\n    def mark_removed(self):\n        self.state = TrackState.Removed\n"
  },
  {
    "path": "yolox/tracker/diffusion_tracker.py",
    "content": "import numpy as np\nfrom collections import deque\n\nimport torch\nimport torch.nn.functional as F \nimport torchvision\nfrom copy import deepcopy\nfrom yolox.tracker import matching\nfrom detectron2.structures import Boxes\nfrom yolox.utils.box_ops import box_xyxy_to_cxcywh\nfrom yolox.utils.boxes import xyxy2cxcywh\nfrom torchvision.ops import box_iou,nms\nfrom yolox.utils.cluster_nms import cluster_nms\n\n\n\nclass DiffusionTracker(object):\n    def __init__(self,model,tensor_type,conf_thresh=0.7,det_thresh=0.6,nms_thresh_3d=0.7,nms_thresh_2d=0.75,interval=5):\n\n        self.frame_id = 0\n        self.backbone=model.backbone\n        self.feature_projs=model.projs\n        self.diffusion_model=model.head\n        self.feature_extractor=self.diffusion_model.head.box_pooler\n        self.det_thresh = det_thresh\n        self.association_thresh = conf_thresh\n        self.low_det_thresh = 0.1\n        self.low_association_thresh = 0.2\n        self.nms_thresh_2d=nms_thresh_2d\n        self.nms_thresh_3d=nms_thresh_3d\n        self.same_thresh=0.9\n        self.pre_features=None\n        self.data_type=tensor_type\n\n        self.re_association_features=None\n        self.re_association_interval=interval\n        # [tracklet_id,T,6] (x,y,x,y,score,t)\n        self.tracklet_db=None\n        self.total_time=0\n        self.dynamic_time=True\n        self.repeat_times=8\n        self.sampling_steps=1\n        self.num_boxes=1000\n\n        self.track_t=40\n        self.re_association_t=40\n        self.mot17=False\n\n    def update(self,cur_image):\n        self.frame_id += 1\n        cur_features,mate_info=self.extract_feature(cur_image=cur_image)\n        mate_shape,mate_device,mate_dtype=mate_info\n        self.diffusion_model.device=mate_device\n        self.diffusion_model.dtype=mate_dtype\n        b,_,h,w=mate_shape\n        images_whwh=torch.tensor([w, h, w, h], dtype=mate_dtype, device=mate_device)[None,:].expand(4*b,4)\n        if self.frame_id==1:\n            if self.pre_features is None:\n                self.pre_features=cur_features\n            inps=self.prepare_input(self.pre_features,cur_features)\n            diffusion_outputs,conf_scores,association_time=self.diffusion_model.new_ddim_sample(inps,images_whwh,num_timesteps=self.sampling_steps,num_proposals=self.num_boxes,\n                                                                                                dynamic_time=self.dynamic_time,track_candidate=self.repeat_times)\n            self.total_time+=association_time\n            _,_,detections=self.diffusion_postprocess(diffusion_outputs,conf_scores,conf_thre=self.association_thresh,nms_thre=self.nms_thresh_3d)\n            detections=self.diffusion_det_filt(detections,conf_thre=self.det_thresh,nms_thre=self.nms_thresh_2d)\n            self.tracklet_db=np.zeros((len(detections),1,6))\n            self.tracklet_db[:,-1,:4]=detections[:,:4]\n            self.tracklet_db[:,-1,4]=detections[:,5]\n            self.tracklet_db[:,-1,5]=self.frame_id\n        else:\n            ref_bboxes,ref_track_ids=self.get_targets_from_tracklet_db()\n            inps=self.prepare_input(self.pre_features,cur_features)\n            bboxes=box_xyxy_to_cxcywh(torch.tensor(np.array(ref_bboxes))).type(self.data_type).reshape(1,-1,4).repeat(2,1,1)\n            # ref_num_proposals=self.proposal_schedule(len(ref_bboxes))\n            # ref_sampling_steps=self.sampling_steps_schedule(len(ref_bboxes))\n            track_tracklet_db=np.concatenate([np.zeros((len(self.tracklet_db),1,5)),deepcopy(self.tracklet_db[:,-1,5]).reshape(-1,1,1)],axis=2)\n            diffusion_outputs,conf_scores,association_time=self.diffusion_model.new_ddim_sample(inps,images_whwh,num_timesteps=self.sampling_steps,num_proposals=self.num_boxes,\n                                                                                                ref_targets=bboxes,dynamic_time=self.dynamic_time,track_candidate=self.repeat_times,diffusion_t=self.track_t)\n            self.total_time+=association_time\n            diffusion_ref_detections,diffusion_track_detections,detections=self.diffusion_postprocess(diffusion_outputs,\n                                                                                                      conf_scores,\n                                                                                                      conf_thre=self.low_association_thresh,\n                                                                                                      nms_thre=self.nms_thresh_3d)\n            high_track_inds=diffusion_ref_detections[:,4]>self.association_thresh\n            diffusion_ref_detections,diffusion_track_detections=diffusion_ref_detections[high_track_inds],diffusion_track_detections[high_track_inds]\n            \n            detections=self.diffusion_det_filt(detections,conf_thre=self.low_det_thresh,nms_thre=self.nms_thresh_2d)\n            diffusion_ref_detections,diffusion_track_detections=self.diffusion_track_filt(diffusion_ref_detections,\n                                                                                          diffusion_track_detections,\n                                                                                          conf_thre=self.low_det_thresh,\n                                                                                          nms_thre=self.nms_thresh_2d)\n            \n            pred_track_ids,pred_bboxes,pred_scores=self.diffusion_matching(ref_bboxes,ref_track_ids,\n                                                                           diffusion_ref_detections,\n                                                                           diffusion_track_detections)\n            \n            high_det_inds=detections[:,5]>self.det_thresh\n\n            if pred_bboxes is None:\n                new_detections=detections\n                new_detections_inds=high_det_inds\n            else:\n                dists = matching.iou_distance(pred_bboxes, detections[:,:4])\n                if self.mot17:\n                    dists=matching.fuse_score(dists,detections[:,5])\n                matches,u_track, u_detection = matching.linear_assignment(dists, thresh=self.same_thresh)\n                new_detections=detections[u_detection]\n                new_detections_inds=high_det_inds[u_detection]\n                if len(matches)>0:\n                    pred_bboxes[matches[:,0]]=detections[matches[:,1],:4]\n            if ref_track_ids is not None and pred_track_ids is not None:\n                matching_index=np.argwhere(np.array(ref_track_ids).reshape(-1,1)==pred_track_ids.reshape(1,-1))\n                track_tracklet_db[ref_track_ids[matching_index[:,0]],-1,:4]=pred_bboxes[matching_index[:,1]]\n                track_tracklet_db[ref_track_ids[matching_index[:,0]],-1,4]=pred_scores[matching_index[:,1]]\n                track_tracklet_db[ref_track_ids[matching_index[:,0]],-1,5]=self.frame_id\n                # self.track_t=400\n                self.track_t=self.extract_mean_track_t(self.tracklet_db[ref_track_ids[matching_index[:,0]],-1,:4],pred_bboxes[matching_index[:,1]])\n                # print(self.track_t)\n            self.tracklet_db=np.concatenate([self.tracklet_db,track_tracklet_db],axis=1)\n            # yolox init new tracks\n            if len(new_detections[new_detections_inds])>0:\n                new_detections=new_detections[new_detections_inds]\n                pred_bboxes,pred_scores=new_detections[:,:4],new_detections[:,5]\n                new_tracklet_db=np.zeros((len(new_detections),self.frame_id,6))\n                new_tracklet_db[:,-1,:4]=pred_bboxes\n                new_tracklet_db[:,-1,4]=pred_scores\n                new_tracklet_db[:,-1,5]=self.frame_id\n                self.tracklet_db=np.concatenate([self.tracklet_db,new_tracklet_db],axis=0)\n                \n        self.pre_features=cur_features\n        if (self.frame_id-1)%self.re_association_interval==0:\n            if self.frame_id!=1:\n                # reassociation\n                inps=self.prepare_input(self.re_association_features,cur_features)\n                # images_whwh=torch.tensor([w, h, w, h], dtype=mate_dtype, device=mate_device)[None,:].expand(4*b,4)\n\n                ref_mask=self.tracklet_db[:,-1-self.re_association_interval,:5].sum(-1)>0\n                ref_bbox=deepcopy(self.tracklet_db[ref_mask,-1-self.re_association_interval,:4])\n                ref_track_ids=np.arange(len(self.tracklet_db))[ref_mask]\n\n                \n                cur_mask=self.tracklet_db[:,-1,:5].sum(-1)>0\n                cur_bbox=deepcopy(self.tracklet_db[cur_mask,-1,:4])\n                cur_track_ids=np.arange(len(self.tracklet_db))[cur_mask]\n\n                mix_mask=np.logical_and(ref_mask,cur_mask)\n                if sum(mix_mask)>0:\n                    # self.re_association_t=400\n                    self.re_association_t=self.extract_mean_track_t(self.tracklet_db[mix_mask,-1-self.re_association_interval,:4],self.tracklet_db[mix_mask,-1,:4])\n\n                bboxes=box_xyxy_to_cxcywh(torch.tensor(np.array(ref_bbox))).type(self.data_type).reshape(1,-1,4).repeat(2,1,1)\n\n                diffusion_outputs,conf_scores,association_time=self.diffusion_model.new_ddim_sample(inps,images_whwh,num_timesteps=self.sampling_steps,num_proposals=self.num_boxes,\n                                                                                                    ref_targets=bboxes,dynamic_time=self.dynamic_time,track_candidate=self.repeat_times,diffusion_t=self.re_association_t)\n                # self.total_time+=association_time\n                diffusion_ref_detections,diffusion_track_detections,_=self.diffusion_postprocess(diffusion_outputs,\n                                                                                                 conf_scores,\n                                                                                                 conf_thre=self.association_thresh,\n                                                                                                 nms_thre=self.nms_thresh_3d)\n                \n                diffusion_ref_detections,diffusion_track_detections=self.diffusion_track_filt(diffusion_ref_detections,\n                                                                                diffusion_track_detections,\n                                                                                conf_thre=self.det_thresh,\n                                                                                nms_thre=self.nms_thresh_2d)\n\n                pred_track_ids,pred_bboxes,pred_scores=self.diffusion_matching(ref_bbox,ref_track_ids,\n                                                                diffusion_ref_detections,\n                                                                diffusion_track_detections)\n                if pred_bboxes is not None:\n                    dists = matching.iou_distance(pred_bboxes,cur_bbox)\n                    matches,u_track, u_detection = matching.linear_assignment(dists, thresh=self.same_thresh)\n                    if len(matches)>0:\n                        re_aasociation_mask=pred_track_ids[matches[:,0]]!=cur_track_ids[matches[:,1]]\n                        for pre_track_id,cur_track_id in zip(pred_track_ids[matches[:,0]][re_aasociation_mask],\n                                                            cur_track_ids[matches[:,1]][re_aasociation_mask]):\n                            if self.tracklet_db[cur_track_id,-1-self.re_association_interval,-1]==0 and pre_track_id!=cur_track_id and \\\n                                max(self.tracklet_db[pre_track_id,-1-self.re_association_interval:,-1])<max(self.tracklet_db[cur_track_id,-1-self.re_association_interval:,-1]):\n                                self.tracklet_db[pre_track_id]=np.where(self.tracklet_db[pre_track_id]>self.tracklet_db[cur_track_id],self.tracklet_db[pre_track_id],self.tracklet_db[cur_track_id])\n            self.re_association_features=cur_features\n\n    def get_results(self):\n        results=[]\n        overall_obj_ids=np.arange(len(self.tracklet_db))\n        for t in range(len(self.tracklet_db[0])):\n            activated_mask=self.tracklet_db[:,t,:5].sum(-1)>0\n            obj_info=self.tracklet_db[activated_mask,t,:]\n            obj_track_ids=overall_obj_ids[activated_mask]\n            results.append((obj_track_ids,obj_info))\n        return results\n    \n    def extract_feature(self,cur_image):\n        fpn_outs=self.backbone(cur_image)\n        cur_features=[]\n        for proj,l_feat in zip(self.feature_projs,fpn_outs):\n            cur_features.append(proj(l_feat))\n        mate_info=(cur_image.shape,cur_image.device,cur_image.dtype)\n        return cur_features,mate_info\n\n    def extract_mean_track_t(self,pre_box,cur_box):\n        # \"xyxy\"\n        pre_box=xyxy2cxcywh(pre_box)\n        cur_box=xyxy2cxcywh(cur_box)\n        abs_box=np.abs(pre_box-cur_box)\n        abs_percent=np.sum(abs_box/(pre_box+1e-5),axis=1)/4\n        track_t=np.mean(abs_percent)\n        return min(max(int(track_t*1000),1),999)\n\n    \n    def diffusion_postprocess(self,diffusion_outputs,conf_scores,nms_thre=0.7,conf_thre=0.6):\n\n        pre_prediction,cur_prediction=diffusion_outputs.split(len(diffusion_outputs)//2,dim=0)\n\n        output = [None for _ in range(len(pre_prediction))]\n        for i,(pre_image_pred,cur_image_pred,association_score) in enumerate(zip(pre_prediction,cur_prediction,conf_scores)):\n\n            association_score=association_score.flatten()\n            # If none are remaining => process next image\n            if not pre_image_pred.size(0):\n                continue\n            # _, conf_mask = torch.topk((image_pred[:, 4] * class_conf.squeeze()), 1000)\n            # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)\n            detections=torch.zeros((2,len(cur_image_pred),7),dtype=cur_image_pred.dtype,device=cur_image_pred.device)\n            detections[0,:,:4]=pre_image_pred[:,:4]\n            detections[1,:,:4]=cur_image_pred[:,:4]\n            detections[0,:,4]=association_score\n            detections[1,:,4]=association_score\n            detections[0,:,5]=torch.sqrt(torch.sigmoid(pre_image_pred[:,4])*association_score)\n            detections[1,:,5]=torch.sqrt(torch.sigmoid(cur_image_pred[:,4])*association_score)\n\n            score_out_index=association_score>conf_thre\n\n            # strategy=torch.mean\n            # value=strategy(detections[:,:,5],dim=0,keepdim=False)\n            # score_out_index=value>conf_thre\n\n            detections=detections[:,score_out_index,:]\n\n            if not detections.size(1):\n                output[i]=detections\n                continue\n\n            nms_out_index_3d = cluster_nms(\n                                        detections[0,:,:4],\n                                        detections[1,:,:4],\n                                        # value[score_out_index],\n                                        detections[0,:,4],\n                                        iou_threshold=nms_thre)\n\n            detections = detections[:,nms_out_index_3d,:]\n            if output[i] is None:\n                output[i] = detections\n            else:\n                output[i] = torch.cat((output[i], detections))\n\n        return output[0][0],output[0][1],torch.cat([output[1][0],output[1][1]],dim=0) if len(output)>=2 else None\n\n    def diffusion_track_filt(self,ref_detections,track_detections,conf_thre=0.6,nms_thre=0.7):\n\n        if not ref_detections.size(1):\n            return ref_detections.cpu().numpy(),track_detections.cpu().numpy()\n        \n        scores=ref_detections[:,5]\n        score_out_index=scores>conf_thre\n        ref_detections=ref_detections[score_out_index]\n        track_detections=track_detections[score_out_index]\n        nms_out_index = torchvision.ops.batched_nms(\n                ref_detections[:, :4],\n                ref_detections[:, 5],\n                ref_detections[:, 6],\n                nms_thre,\n            )\n        return ref_detections[nms_out_index].cpu().numpy(),track_detections[nms_out_index].cpu().numpy()\n\n    def diffusion_det_filt(self,diffusion_detections,conf_thre=0.6,nms_thre=0.7):\n\n        if not diffusion_detections.size(1):\n            return diffusion_detections.cpu().numpy()\n\n        scores=diffusion_detections[:,5]\n        score_out_index=scores>conf_thre\n        diffusion_detections=diffusion_detections[score_out_index]\n        nms_out_index = torchvision.ops.batched_nms(\n                diffusion_detections[:, :4],\n                diffusion_detections[:, 5],\n                diffusion_detections[:, 6],\n                nms_thre,\n            )\n        return diffusion_detections[nms_out_index].cpu().numpy()\n    \n    def diffusion_matching(self,ref_bboxes,ref_track_ids,diffusion_pre_track_outputs,diffusion_cur_track_outputs):\n        ref_bboxes=np.array(ref_bboxes)\n        dists=matching.iou_distance(ref_bboxes,diffusion_pre_track_outputs[:,:4])\n        matches,u_track, u_detection = matching.linear_assignment(dists, thresh=self.same_thresh)\n        if len(matches)>0:\n            ref_track_ids=np.array(ref_track_ids)[matches[:,0]]\n            return ref_track_ids,diffusion_cur_track_outputs[matches[:,1],:4],diffusion_cur_track_outputs[matches[:,1],5]\n        else:\n            return None,None,None\n    \n    def proposal_schedule(self,num_ref_bboxes):\n        # simple strategy\n        return 16*num_ref_bboxes\n    \n    def sampling_steps_schedule(self,num_ref_bboxes):\n        min_sampling_steps=1\n        max_sampling_steps=4\n        min_num_bboxes=10\n        max_num_bboxes=100\n        ref_sampling_steps=(num_ref_bboxes-min_num_bboxes)*(max_sampling_steps-min_sampling_steps)/(max_num_bboxes-min_num_bboxes)+min_sampling_steps\n\n        return min(max(int(ref_sampling_steps),min_sampling_steps),max_sampling_steps)\n\n    def vote_to_remove_candidate(self,track_ids,detections,vote_iou_thres=0.75,sorted=False,descending=False):\n\n        box_pred_per_image, scores_per_image=detections[:,:4],detections[:,4]*detections[:,5]\n        score_track_indices=torch.argsort((track_ids+scores_per_image),descending=True)\n        track_ids=track_ids[score_track_indices]\n        scores_per_image=scores_per_image[score_track_indices]\n        box_pred_per_image=box_pred_per_image[score_track_indices]\n\n        assert len(track_ids)==box_pred_per_image.shape[0]\n\n        # vote guarantee only one track id in track candidates\n        keep_mask = torch.zeros_like(scores_per_image, dtype=torch.bool)\n        for class_id in torch.unique(track_ids):\n            curr_indices = torch.where(track_ids == class_id)[0]\n            curr_keep_indices = nms(box_pred_per_image[curr_indices],scores_per_image[curr_indices],vote_iou_thres)\n            candidate_iou_indices=box_iou(box_pred_per_image[curr_indices],box_pred_per_image[curr_indices])>vote_iou_thres\n            counter=[]\n            for cluster_indice in candidate_iou_indices[curr_keep_indices]:\n                cluster_scores=scores_per_image[curr_indices][cluster_indice]\n                counter.append(len(cluster_scores)+torch.mean(cluster_scores))\n            max_indice=torch.argmax(torch.tensor(counter).type(self.data_type))\n            keep_mask[curr_indices[curr_keep_indices][max_indice]] = True\n        \n        keep_indices = torch.where(keep_mask)[0]        \n        track_ids=track_ids[keep_indices]\n        box_pred_per_image=box_pred_per_image[keep_indices]\n        scores_per_image=scores_per_image[keep_indices]\n\n        if sorted and not descending:\n            descending_indices=torch.argsort(track_ids)\n            track_ids=track_ids[descending_indices]\n            box_pred_per_image=box_pred_per_image[descending_indices]\n            scores_per_image=scores_per_image[descending_indices]\n\n        return track_ids.cpu().numpy(),box_pred_per_image.cpu().numpy(),scores_per_image.cpu().numpy()\n\n    def prepare_input(self,pre_features,cur_features):\n        inps_pre_features=[]\n        inps_cur_Features=[]\n        for l_pre_feat,l_cur_feat in zip(pre_features,cur_features):\n            inps_pre_features.append(torch.cat([l_pre_feat.clone(),l_cur_feat.clone()],dim=0))\n            inps_cur_Features.append(torch.cat([l_cur_feat.clone(),l_cur_feat.clone()],dim=0))\n        return (inps_pre_features,inps_cur_Features)\n\n    def get_targets_from_tracklet_db(self):\n        ref_mask=self.tracklet_db[:,-1,:5].sum(-1)>0\n        ref_bbox=deepcopy(self.tracklet_db[ref_mask,-1,:4])\n        ref_track_ids=np.arange(len(self.tracklet_db))[ref_mask]\n        return ref_bbox,ref_track_ids\n\n\ndef joint_stracks(tlista, tlistb):\n    exists = {}\n    res = []\n    for t in tlista:\n        exists[t.track_id] = 1\n        res.append(t)\n    for t in tlistb:\n        tid = t.track_id\n        if not exists.get(tid, 0):\n            exists[tid] = 1\n            res.append(t)\n    return res\n\n\ndef sub_stracks(tlista, tlistb):\n    stracks = {}\n    for t in tlista:\n        stracks[t.track_id] = t\n    for t in tlistb:\n        tid = t.track_id\n        if stracks.get(tid, 0):\n            del stracks[tid]\n    return list(stracks.values())\n\n\n"
  },
  {
    "path": "yolox/tracker/diffusion_tracker_kl.py",
    "content": "import numpy as np\nfrom collections import deque\nimport time\nimport torch\nimport torch.nn.functional as F \nimport torchvision\nfrom copy import deepcopy\nfrom yolox.tracker import matching\nfrom detectron2.structures import Boxes\nfrom yolox.utils.box_ops import box_xyxy_to_cxcywh\nfrom yolox.utils.boxes import xyxy2cxcywh\nfrom torchvision.ops import box_iou,nms\nfrom yolox.utils.cluster_nms import cluster_nms\n\nfrom .kalman_filter import KalmanFilter\nfrom yolox.tracker import matching\nfrom .basetrack import BaseTrack, TrackState\n\nclass STrack(BaseTrack):\n    shared_kalman = KalmanFilter()\n    def __init__(self, tlwh, score):\n\n        # wait activate\n        self._tlwh = np.asarray(tlwh, dtype=np.float)\n        self.kalman_filter = None\n        self.mean, self.covariance = None, None\n        self.is_activated = False\n\n        self.score = score\n        self.tracklet_len = 0\n\n    def predict(self):\n        mean_state = self.mean.copy()\n        if self.state != TrackState.Tracked:\n            mean_state[7] = 0\n        self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)\n\n    @staticmethod\n    def multi_predict(stracks):\n        if len(stracks) > 0:\n            multi_mean = np.asarray([st.mean.copy() for st in stracks])\n            multi_covariance = np.asarray([st.covariance for st in stracks])\n            for i, st in enumerate(stracks):\n                if st.state != TrackState.Tracked:\n                    multi_mean[i][7] = 0\n            multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)\n            for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):\n                stracks[i].mean = mean\n                stracks[i].covariance = cov\n\n    def activate(self, kalman_filter, frame_id):\n        \"\"\"Start a new tracklet\"\"\"\n        self.kalman_filter = kalman_filter\n        self.track_id = self.next_id()\n        self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))\n\n        self.tracklet_len = 0\n        self.state = TrackState.Tracked\n        if frame_id == 1:\n            self.is_activated = True\n        # self.is_activated = True\n        self.frame_id = frame_id\n        self.start_frame = frame_id\n\n    def re_activate(self, new_track, frame_id, new_id=False):\n        self.mean, self.covariance = self.kalman_filter.update(\n            self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)\n        )\n        self._tlwh=new_track.tlwh\n        self.tracklet_len = 0\n        self.state = TrackState.Tracked\n        self.is_activated = True\n        self.frame_id = frame_id\n        if new_id:\n            self.track_id = self.next_id()\n        self.score = new_track.score\n\n    def update(self, new_track, frame_id):\n        \"\"\"\n        Update a matched track\n        :type new_track: STrack\n        :type frame_id: int\n        :type update_feature: bool\n        :return:\n        \"\"\"\n        self.frame_id = frame_id\n        self.tracklet_len += 1\n\n        new_tlwh = new_track.tlwh\n        self._tlwh=new_tlwh\n        self.mean, self.covariance = self.kalman_filter.update(\n            self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))\n        self.state = TrackState.Tracked\n        self.is_activated = True\n\n        self.score = new_track.score\n\n    @property\n    # @jit(nopython=True)\n    def tlwh(self):\n        \"\"\"Get current position in bounding box format `(top left x, top left y,\n                width, height)`.\n        \"\"\"\n        if self.mean is None:\n            return self._tlwh.copy()\n        ret = self.mean[:4].copy()\n        ret[2] *= ret[3]\n        ret[:2] -= ret[2:] / 2\n        return ret\n\n    @property\n    # @jit(nopython=True)\n    def tlbr(self):\n        \"\"\"Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,\n        `(top left, bottom right)`.\n        \"\"\"\n        ret = self.tlwh.copy()\n        ret[2:] += ret[:2]\n        return ret\n\n    @staticmethod\n    # @jit(nopython=True)\n    def tlwh_to_xyah(tlwh):\n        \"\"\"Convert bounding box to format `(center x, center y, aspect ratio,\n        height)`, where the aspect ratio is `width / height`.\n        \"\"\"\n        ret = np.asarray(tlwh).copy()\n        ret[:2] += ret[2:] / 2\n        ret[2] /= ret[3]\n        return ret\n\n    def to_xyah(self):\n        return self.tlwh_to_xyah(self.tlwh)\n\n    @staticmethod\n    # @jit(nopython=True)\n    def tlbr_to_tlwh(tlbr):\n        ret = np.asarray(tlbr).copy()\n        ret[2:] -= ret[:2]\n        return ret\n\n    @staticmethod\n    # @jit(nopython=True)\n    def tlwh_to_tlbr(tlwh):\n        ret = np.asarray(tlwh).copy()\n        ret[2:] += ret[:2]\n        return ret\n\n    def __repr__(self):\n        return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)\n\nclass DiffusionTracker(object):\n    def __init__(self,model,tensor_type,conf_thresh=0.7,det_thresh=0.6,nms_thresh_3d=0.7,nms_thresh_2d=0.75,interval=5,detections=None):\n\n        self.frame_id = 0\n        # BaseTrack._count=-1\n        self.backbone=model.backbone\n        self.feature_projs=model.projs\n        self.diffusion_model=model.head\n        self.feature_extractor=self.diffusion_model.head.box_pooler\n        self.det_thresh = det_thresh\n        self.association_thresh = conf_thresh\n        # self.low_det_thresh = 0.1\n        # self.low_association_thresh = 0.2\n        self.nms_thresh_2d=nms_thresh_2d\n        self.nms_thresh_3d=nms_thresh_3d\n        self.same_thresh=0.9\n        self.pre_features=None\n        self.data_type=tensor_type\n        self.detections=detections\n\n        self.tracked_stracks = []  # type: list[STrack]\n        self.lost_stracks = []  # type: list[STrack]\n        self.removed_stracks = []  # type: list[STrack]\n        self.max_time_lost = 30\n        self.kalman_filter = KalmanFilter()\n\n        self.repeat_times=0\n        self.dynamic_time=True\n        \n        self.sampling_steps=1\n        self.num_boxes=500\n\n        self.track_t=400\n        self.mot17=False\n\n    def update(self,cur_image):\n        self.frame_id += 1\n        activated_starcks = []\n        refind_stracks = []\n        lost_stracks = []\n        removed_stracks = []\n        cur_features,mate_info=self.extract_feature(cur_image=cur_image)\n        mate_shape,mate_device,mate_dtype=mate_info\n        self.diffusion_model.device=mate_device\n        self.diffusion_model.dtype=mate_dtype\n        b,_,h,w=mate_shape\n        images_whwh=torch.tensor([w, h, w, h], dtype=mate_dtype, device=mate_device)[None,:].expand(4*b,4)\n        if self.frame_id==1:\n            if self.pre_features is None:\n                self.pre_features=cur_features\n            inps=self.prepare_input(self.pre_features,cur_features)\n            diffusion_outputs,conf_scores,association_time=self.diffusion_model.new_ddim_sample(inps,images_whwh,num_timesteps=self.sampling_steps,num_proposals=self.num_boxes,\n                                                                                                dynamic_time=self.dynamic_time,track_candidate=self.repeat_times)\n            _,_,detections=self.diffusion_postprocess(diffusion_outputs,conf_scores,conf_thre=self.association_thresh,nms_thre=self.nms_thresh_3d)\n            detections=self.diffusion_det_filt(detections,conf_thre=self.det_thresh,nms_thre=self.nms_thresh_2d)\n            # detections=np.array(self.detections[self.frame_id])\n            # detections=detections[detections[:,5]>self.det_thresh]\n            for det in detections:\n                track=STrack(STrack.tlbr_to_tlwh(det[:4]), det[5])\n                track.activate(self.kalman_filter, self.frame_id)\n                self.tracked_stracks.append(track)\n            output_stracks = [track for track in self.tracked_stracks if track.is_activated]\n            return output_stracks,association_time\n        else:\n            ref_bboxes=[STrack.tlwh_to_tlbr(track._tlwh) for track in self.tracked_stracks]\n            inps=self.prepare_input(self.pre_features,cur_features)\n            if len(ref_bboxes)>0:\n                bboxes=box_xyxy_to_cxcywh(torch.tensor(np.array(ref_bboxes))).type(self.data_type).reshape(1,-1,4).repeat(2,1,1)\n            else:\n                bboxes=None\n            # ref_num_proposals=self.proposal_schedule(len(ref_bboxes))\n            # ref_sampling_steps=self.sampling_steps_schedule(len(ref_bboxes))\n            diffusion_outputs,conf_scores,association_time=self.diffusion_model.new_ddim_sample(inps,images_whwh,num_timesteps=self.sampling_steps,num_proposals=self.num_boxes,\n                                                                                                ref_targets=bboxes,dynamic_time=self.dynamic_time,track_candidate=self.repeat_times,diffusion_t=self.track_t)\n            diffusion_ref_detections,diffusion_track_detections,detections=self.diffusion_postprocess(diffusion_outputs,\n                                                                                                      conf_scores,\n                                                                                                      conf_thre=self.association_thresh,\n                                                                                                      nms_thre=self.nms_thresh_3d)\n            \n            detections=self.diffusion_det_filt(detections,conf_thre=self.det_thresh,nms_thre=self.nms_thresh_2d)\n            # detections=np.array(self.detections[self.frame_id])\n            # if len(detections)>0:\n            #     detections=detections[detections[:,5]>self.det_thresh]\n            diffusion_ref_detections,diffusion_track_detections=self.diffusion_track_filt(diffusion_ref_detections,\n                                                                                          diffusion_track_detections,\n                                                                                          conf_thre=self.det_thresh,\n                                                                                          nms_thre=self.nms_thresh_2d)\n            start_time=time.time()\n            STrack.multi_predict(self.tracked_stracks)\n            dists = matching.iou_distance(ref_bboxes, diffusion_ref_detections[:,:4])\n            matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.same_thresh)\n\n            if len(matches)>0:\n                # fix position with detection result\n                dists_fix=matching.iou_distance(diffusion_track_detections[matches[:,1],:4],detections[:,:4])\n                matches_fix, u_track_fix, u_detection_fix = matching.linear_assignment(dists_fix, thresh=self.same_thresh)\n                if len(matches_fix)>0:\n                    diffusion_track_detections[matches[:,1]][matches_fix[:,0],:4]=detections[matches_fix[:,1],:4]\n\n                # filt detection with tracked result\n                detections=detections[u_detection_fix]\n\n            ref_box_t=[]\n            track_box_t=[]\n            for itracked, idet in matches:\n                track = self.tracked_stracks[itracked]\n                ref_box_t.append(STrack.tlwh_to_tlbr(track._tlwh))\n                det = diffusion_track_detections[idet]\n                track_box_t.append(det[:4])\n                new_strack=STrack(STrack.tlbr_to_tlwh(det[:4]), det[5])\n                if track.state == TrackState.Tracked:\n                    track.update(new_strack, self.frame_id)\n                    activated_starcks.append(track)\n                else:\n                    track.re_activate(new_strack, self.frame_id, new_id=False)\n                    refind_stracks.append(track)\n            if len(ref_box_t)>0:\n                self.track_t=self.extract_mean_track_t(np.array(ref_box_t),np.array(track_box_t))\n            for it in u_track:\n                track = self.tracked_stracks[it]\n                if not track.state == TrackState.Lost:\n                    track.mark_lost()\n                    lost_stracks.append(track)\n            \n            STrack.multi_predict(self.lost_stracks)\n\n            dists_lost = matching.iou_distance([track.tlbr for track in self.lost_stracks], detections[:4])\n            matches_lost, u_track_lost, u_detection_lost = matching.linear_assignment(dists_lost, thresh=self.same_thresh)\n\n            for itracked, idet in matches_lost:\n                track = self.lost_stracks[itracked]\n                det = detections[idet]\n                new_strack=STrack(STrack.tlbr_to_tlwh(det[:4]), det[5])\n                if track.state == TrackState.Tracked:\n                    track.update(new_strack, self.frame_id)\n                    activated_starcks.append(track)\n                else:\n                    track.re_activate(new_strack, self.frame_id, new_id=False)\n                    refind_stracks.append(track)\n            \n\n            for inew in u_detection_lost:\n            # for inew in range(len(detections)):\n                det = detections[inew]\n                track=STrack(STrack.tlbr_to_tlwh(det[:4]), det[5])\n                track.activate(self.kalman_filter, self.frame_id)\n                activated_starcks.append(track)\n\n            for track in self.lost_stracks:\n                if self.frame_id - track.end_frame > self.max_time_lost:\n                    track.mark_removed()\n                    removed_stracks.append(track)\n            \n\n            self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]\n            self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks)\n            self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)\n            self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)\n            self.lost_stracks.extend(lost_stracks)\n            self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)\n            self.removed_stracks.extend(removed_stracks)\n            self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)\n            # get scores of lost tracks\n           \n\n        self.pre_features=cur_features\n        output_stracks = [track for track in self.tracked_stracks]\n        return output_stracks,association_time+time.time()-start_time\n    \n    def extract_feature(self,cur_image):\n        fpn_outs=self.backbone(cur_image)\n        cur_features=[]\n        for proj,l_feat in zip(self.feature_projs,fpn_outs):\n            cur_features.append(proj(l_feat))\n        mate_info=(cur_image.shape,cur_image.device,cur_image.dtype)\n        return cur_features,mate_info\n\n    def extract_mean_track_t(self,pre_box,cur_box):\n        # \"xyxy\"\n        pre_box=xyxy2cxcywh(pre_box)\n        cur_box=xyxy2cxcywh(cur_box)\n        abs_box=np.abs(pre_box-cur_box)\n        abs_percent=np.sum(abs_box/(pre_box+1e-5),axis=1)/4\n        track_t=np.mean(abs_percent)\n        # min(max(int(track_t*1000),1),999)\n        # min(max(int((np.exp(track_t)-1)/(np.exp(0)-1)*1000),1),999)\n        # min(max(int(np.log(track_t+1)/np.log(2)*1000),1),999)\n        return min(max(int(track_t*1000),1),999)\n\n    \n    def diffusion_postprocess(self,diffusion_outputs,conf_scores,nms_thre=0.7,conf_thre=0.6):\n\n        pre_prediction,cur_prediction=diffusion_outputs.split(len(diffusion_outputs)//2,dim=0)\n\n        output = [None for _ in range(len(pre_prediction))]\n        for i,(pre_image_pred,cur_image_pred,association_score) in enumerate(zip(pre_prediction,cur_prediction,conf_scores)):\n\n            association_score=association_score.flatten()\n            # If none are remaining => process next image\n            if not pre_image_pred.size(0):\n                continue\n            # _, conf_mask = torch.topk((image_pred[:, 4] * class_conf.squeeze()), 1000)\n            # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)\n            detections=torch.zeros((2,len(cur_image_pred),7),dtype=cur_image_pred.dtype,device=cur_image_pred.device)\n            detections[0,:,:4]=pre_image_pred[:,:4]\n            detections[1,:,:4]=cur_image_pred[:,:4]\n            detections[0,:,4]=association_score\n            detections[1,:,4]=association_score\n            detections[0,:,5]=torch.sqrt(torch.sigmoid(pre_image_pred[:,4])*association_score)\n            detections[1,:,5]=torch.sqrt(torch.sigmoid(cur_image_pred[:,4])*association_score)\n\n            score_out_index=association_score>conf_thre\n\n            # strategy=torch.mean\n            # value=strategy(detections[:,:,5],dim=0,keepdim=False)\n            # score_out_index=value>conf_thre\n\n            detections=detections[:,score_out_index,:]\n\n            if not detections.size(1):\n                output[i]=detections\n                continue\n\n            nms_out_index_3d = cluster_nms(\n                                        detections[0,:,:4],\n                                        detections[1,:,:4],\n                                        # value[score_out_index],\n                                        detections[0,:,4],\n                                        iou_threshold=nms_thre)\n\n            detections = detections[:,nms_out_index_3d,:]\n            if output[i] is None:\n                output[i] = detections\n            else:\n                output[i] = torch.cat((output[i], detections))\n\n        return output[0][0],output[0][1],torch.cat([output[1][0],output[1][1]],dim=0) if len(output)>=2 else None\n\n    def diffusion_track_filt(self,ref_detections,track_detections,conf_thre=0.6,nms_thre=0.7):\n\n        if not ref_detections.size(1):\n            return ref_detections.cpu().numpy(),track_detections.cpu().numpy()\n        \n        scores=ref_detections[:,5]\n        score_out_index=scores>conf_thre\n        ref_detections=ref_detections[score_out_index]\n        track_detections=track_detections[score_out_index]\n        nms_out_index = torchvision.ops.batched_nms(\n                ref_detections[:, :4],\n                ref_detections[:, 5],\n                ref_detections[:, 6],\n                nms_thre,\n            )\n        return ref_detections[nms_out_index].cpu().numpy(),track_detections[nms_out_index].cpu().numpy()\n\n    def diffusion_det_filt(self,diffusion_detections,conf_thre=0.6,nms_thre=0.7):\n\n        if not diffusion_detections.size(1):\n            return diffusion_detections.cpu().numpy()\n\n        scores=diffusion_detections[:,5]\n        score_out_index=scores>conf_thre\n        diffusion_detections=diffusion_detections[score_out_index]\n        nms_out_index = torchvision.ops.batched_nms(\n                diffusion_detections[:, :4],\n                diffusion_detections[:, 5],\n                diffusion_detections[:, 6],\n                nms_thre,\n            )\n        return diffusion_detections[nms_out_index].cpu().numpy()\n    \n    def proposal_schedule(self,num_ref_bboxes):\n        # simple strategy\n        return 16*num_ref_bboxes\n    \n    def sampling_steps_schedule(self,num_ref_bboxes):\n        min_sampling_steps=1\n        max_sampling_steps=4\n        min_num_bboxes=10\n        max_num_bboxes=100\n        ref_sampling_steps=(num_ref_bboxes-min_num_bboxes)*(max_sampling_steps-min_sampling_steps)/(max_num_bboxes-min_num_bboxes)+min_sampling_steps\n\n        return min(max(int(ref_sampling_steps),min_sampling_steps),max_sampling_steps)\n\n    def vote_to_remove_candidate(self,track_ids,detections,vote_iou_thres=0.75,sorted=False,descending=False):\n\n        box_pred_per_image, scores_per_image=detections[:,:4],detections[:,4]*detections[:,5]\n        score_track_indices=torch.argsort((track_ids+scores_per_image),descending=True)\n        track_ids=track_ids[score_track_indices]\n        scores_per_image=scores_per_image[score_track_indices]\n        box_pred_per_image=box_pred_per_image[score_track_indices]\n\n        assert len(track_ids)==box_pred_per_image.shape[0]\n\n        # vote guarantee only one track id in track candidates\n        keep_mask = torch.zeros_like(scores_per_image, dtype=torch.bool)\n        for class_id in torch.unique(track_ids):\n            curr_indices = torch.where(track_ids == class_id)[0]\n            curr_keep_indices = nms(box_pred_per_image[curr_indices],scores_per_image[curr_indices],vote_iou_thres)\n            candidate_iou_indices=box_iou(box_pred_per_image[curr_indices],box_pred_per_image[curr_indices])>vote_iou_thres\n            counter=[]\n            for cluster_indice in candidate_iou_indices[curr_keep_indices]:\n                cluster_scores=scores_per_image[curr_indices][cluster_indice]\n                counter.append(len(cluster_scores)+torch.mean(cluster_scores))\n            max_indice=torch.argmax(torch.tensor(counter).type(self.data_type))\n            keep_mask[curr_indices[curr_keep_indices][max_indice]] = True\n        \n        keep_indices = torch.where(keep_mask)[0]        \n        track_ids=track_ids[keep_indices]\n        box_pred_per_image=box_pred_per_image[keep_indices]\n        scores_per_image=scores_per_image[keep_indices]\n\n        if sorted and not descending:\n            descending_indices=torch.argsort(track_ids)\n            track_ids=track_ids[descending_indices]\n            box_pred_per_image=box_pred_per_image[descending_indices]\n            scores_per_image=scores_per_image[descending_indices]\n\n        return track_ids.cpu().numpy(),box_pred_per_image.cpu().numpy(),scores_per_image.cpu().numpy()\n\n    def prepare_input(self,pre_features,cur_features):\n        inps_pre_features=[]\n        inps_cur_Features=[]\n        for l_pre_feat,l_cur_feat in zip(pre_features,cur_features):\n            inps_pre_features.append(torch.cat([l_pre_feat.clone(),l_cur_feat.clone()],dim=0))\n            inps_cur_Features.append(torch.cat([l_cur_feat.clone(),l_cur_feat.clone()],dim=0))\n        return (inps_pre_features,inps_cur_Features)\n\n    # def get_targets_from_tracklet_db(self):\n    #     ref_mask=self.tracklet_db[:,-1,:5].sum(-1)>0\n    #     ref_bbox=deepcopy(self.tracklet_db[ref_mask,-1,:4])\n    #     ref_track_ids=np.arange(len(self.tracklet_db))[ref_mask]\n    #     return ref_bbox,ref_track_ids\n\n\ndef joint_stracks(tlista, tlistb):\n    exists = {}\n    res = []\n    for t in tlista:\n        exists[t.track_id] = 1\n        res.append(t)\n    for t in tlistb:\n        tid = t.track_id\n        if not exists.get(tid, 0):\n            exists[tid] = 1\n            res.append(t)\n    return res\n\n\ndef sub_stracks(tlista, tlistb):\n    stracks = {}\n    for t in tlista:\n        stracks[t.track_id] = t\n    for t in tlistb:\n        tid = t.track_id\n        if stracks.get(tid, 0):\n            del stracks[tid]\n    return list(stracks.values())\n\nfrom sklearn.metrics.pairwise import cosine_similarity\ndef remove_duplicate_stracks(stracksa, stracksb):\n    pdist = matching.iou_distance(stracksa, stracksb)\n    # if len(stracksa)>0 and len(stracksb)>0:\n    #     # fix a derection bug\n    #     pcosdist=cosine_similarity(\n    #         [track.mean[4:6] for track in stracksa],\n    #         [track.mean[4:6] for track in stracksb])\n    #     pdist=(pdist+pcosdist)/2\n    \n    pairs = np.where(pdist < 0.15)\n    dupa, dupb = list(), list()\n    for p, q in zip(*pairs):\n        timep = stracksa[p].frame_id - stracksa[p].start_frame\n        timeq = stracksb[q].frame_id - stracksb[q].start_frame\n        if stracksa[p].mean is not None and stracksb[q].mean is not None:\n            x,y=stracksa[p].mean[4:6],stracksa[p].mean[4:6]\n            cosine_dist=1-np.dot(x,y)/(np.linalg.norm(x)*np.linalg.norm(y)+1e-06)\n            if cosine_dist>0.15:\n                continue\n        if timep > timeq:\n            dupb.append(q)\n        else:\n            dupa.append(p)\n    resa = [t for i, t in enumerate(stracksa) if not i in dupa]\n    resb = [t for i, t in enumerate(stracksb) if not i in dupb]\n    return resa, resb\n\n\n"
  },
  {
    "path": "yolox/tracker/kalman_filter.py",
    "content": "# vim: expandtab:ts=4:sw=4\nimport numpy as np\nimport scipy.linalg\n\n\n\"\"\"\nTable for the 0.95 quantile of the chi-square distribution with N degrees of\nfreedom (contains values for N=1, ..., 9). Taken from MATLAB/Octave's chi2inv\nfunction and used as Mahalanobis gating threshold.\n\"\"\"\nchi2inv95 = {\n    1: 3.8415,\n    2: 5.9915,\n    3: 7.8147,\n    4: 9.4877,\n    5: 11.070,\n    6: 12.592,\n    7: 14.067,\n    8: 15.507,\n    9: 16.919}\n\n\nclass KalmanFilter(object):\n    \"\"\"\n    A simple Kalman filter for tracking bounding boxes in image space.\n\n    The 8-dimensional state space\n\n        x, y, a, h, vx, vy, va, vh\n\n    contains the bounding box center position (x, y), aspect ratio a, height h,\n    and their respective velocities.\n\n    Object motion follows a constant velocity model. The bounding box location\n    (x, y, a, h) is taken as direct observation of the state space (linear\n    observation model).\n\n    \"\"\"\n\n    def __init__(self):\n        ndim, dt = 4, 1.\n\n        # Create Kalman filter model matrices.\n        self._motion_mat = np.eye(2 * ndim, 2 * ndim)\n        for i in range(ndim):\n            self._motion_mat[i, ndim + i] = dt\n        self._update_mat = np.eye(ndim, 2 * ndim)\n\n        # Motion and observation uncertainty are chosen relative to the current\n        # state estimate. These weights control the amount of uncertainty in\n        # the model. This is a bit hacky.\n        self._std_weight_position = 1. / 20\n        self._std_weight_velocity = 1. / 160\n\n    def initiate(self, measurement):\n        \"\"\"Create track from unassociated measurement.\n\n        Parameters\n        ----------\n        measurement : ndarray\n            Bounding box coordinates (x, y, a, h) with center position (x, y),\n            aspect ratio a, and height h.\n\n        Returns\n        -------\n        (ndarray, ndarray)\n            Returns the mean vector (8 dimensional) and covariance matrix (8x8\n            dimensional) of the new track. Unobserved velocities are initialized\n            to 0 mean.\n\n        \"\"\"\n        mean_pos = measurement\n        mean_vel = np.zeros_like(mean_pos)\n        mean = np.r_[mean_pos, mean_vel]\n\n        std = [\n            2 * self._std_weight_position * measurement[3],\n            2 * self._std_weight_position * measurement[3],\n            1e-2,\n            2 * self._std_weight_position * measurement[3],\n            10 * self._std_weight_velocity * measurement[3],\n            10 * self._std_weight_velocity * measurement[3],\n            1e-5,\n            10 * self._std_weight_velocity * measurement[3]]\n        covariance = np.diag(np.square(std))\n        return mean, covariance\n\n    def predict(self, mean, covariance):\n        \"\"\"Run Kalman filter prediction step.\n\n        Parameters\n        ----------\n        mean : ndarray\n            The 8 dimensional mean vector of the object state at the previous\n            time step.\n        covariance : ndarray\n            The 8x8 dimensional covariance matrix of the object state at the\n            previous time step.\n\n        Returns\n        -------\n        (ndarray, ndarray)\n            Returns the mean vector and covariance matrix of the predicted\n            state. Unobserved velocities are initialized to 0 mean.\n\n        \"\"\"\n        std_pos = [\n            self._std_weight_position * mean[3],\n            self._std_weight_position * mean[3],\n            1e-2,\n            self._std_weight_position * mean[3]]\n        std_vel = [\n            self._std_weight_velocity * mean[3],\n            self._std_weight_velocity * mean[3],\n            1e-5,\n            self._std_weight_velocity * mean[3]]\n        motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))\n\n        #mean = np.dot(self._motion_mat, mean)\n        mean = np.dot(mean, self._motion_mat.T)\n        covariance = np.linalg.multi_dot((\n            self._motion_mat, covariance, self._motion_mat.T)) + motion_cov\n\n        return mean, covariance\n\n    def project(self, mean, covariance):\n        \"\"\"Project state distribution to measurement space.\n\n        Parameters\n        ----------\n        mean : ndarray\n            The state's mean vector (8 dimensional array).\n        covariance : ndarray\n            The state's covariance matrix (8x8 dimensional).\n\n        Returns\n        -------\n        (ndarray, ndarray)\n            Returns the projected mean and covariance matrix of the given state\n            estimate.\n\n        \"\"\"\n        std = [\n            self._std_weight_position * mean[3],\n            self._std_weight_position * mean[3],\n            1e-1,\n            self._std_weight_position * mean[3]]\n        innovation_cov = np.diag(np.square(std))\n\n        mean = np.dot(self._update_mat, mean)\n        covariance = np.linalg.multi_dot((\n            self._update_mat, covariance, self._update_mat.T))\n        return mean, covariance + innovation_cov\n\n    def multi_predict(self, mean, covariance):\n        \"\"\"Run Kalman filter prediction step (Vectorized version).\n        Parameters\n        ----------\n        mean : ndarray\n            The Nx8 dimensional mean matrix of the object states at the previous\n            time step.\n        covariance : ndarray\n            The Nx8x8 dimensional covariance matrics of the object states at the\n            previous time step.\n        Returns\n        -------\n        (ndarray, ndarray)\n            Returns the mean vector and covariance matrix of the predicted\n            state. Unobserved velocities are initialized to 0 mean.\n        \"\"\"\n        std_pos = [\n            self._std_weight_position * mean[:, 3],\n            self._std_weight_position * mean[:, 3],\n            1e-2 * np.ones_like(mean[:, 3]),\n            self._std_weight_position * mean[:, 3]]\n        std_vel = [\n            self._std_weight_velocity * mean[:, 3],\n            self._std_weight_velocity * mean[:, 3],\n            1e-5 * np.ones_like(mean[:, 3]),\n            self._std_weight_velocity * mean[:, 3]]\n        sqr = np.square(np.r_[std_pos, std_vel]).T\n\n        motion_cov = []\n        for i in range(len(mean)):\n            motion_cov.append(np.diag(sqr[i]))\n        motion_cov = np.asarray(motion_cov)\n\n        mean = np.dot(mean, self._motion_mat.T)\n        left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))\n        covariance = np.dot(left, self._motion_mat.T) + motion_cov\n\n        return mean, covariance\n\n    def update(self, mean, covariance, measurement):\n        \"\"\"Run Kalman filter correction step.\n\n        Parameters\n        ----------\n        mean : ndarray\n            The predicted state's mean vector (8 dimensional).\n        covariance : ndarray\n            The state's covariance matrix (8x8 dimensional).\n        measurement : ndarray\n            The 4 dimensional measurement vector (x, y, a, h), where (x, y)\n            is the center position, a the aspect ratio, and h the height of the\n            bounding box.\n\n        Returns\n        -------\n        (ndarray, ndarray)\n            Returns the measurement-corrected state distribution.\n\n        \"\"\"\n        projected_mean, projected_cov = self.project(mean, covariance)\n\n        chol_factor, lower = scipy.linalg.cho_factor(\n            projected_cov, lower=True, check_finite=False)\n        kalman_gain = scipy.linalg.cho_solve(\n            (chol_factor, lower), np.dot(covariance, self._update_mat.T).T,\n            check_finite=False).T\n        innovation = measurement - projected_mean\n\n        new_mean = mean + np.dot(innovation, kalman_gain.T)\n        new_covariance = covariance - np.linalg.multi_dot((\n            kalman_gain, projected_cov, kalman_gain.T))\n        return new_mean, new_covariance\n\n    def gating_distance(self, mean, covariance, measurements,\n                        only_position=False, metric='maha'):\n        \"\"\"Compute gating distance between state distribution and measurements.\n        A suitable distance threshold can be obtained from `chi2inv95`. If\n        `only_position` is False, the chi-square distribution has 4 degrees of\n        freedom, otherwise 2.\n        Parameters\n        ----------\n        mean : ndarray\n            Mean vector over the state distribution (8 dimensional).\n        covariance : ndarray\n            Covariance of the state distribution (8x8 dimensional).\n        measurements : ndarray\n            An Nx4 dimensional matrix of N measurements, each in\n            format (x, y, a, h) where (x, y) is the bounding box center\n            position, a the aspect ratio, and h the height.\n        only_position : Optional[bool]\n            If True, distance computation is done with respect to the bounding\n            box center position only.\n        Returns\n        -------\n        ndarray\n            Returns an array of length N, where the i-th element contains the\n            squared Mahalanobis distance between (mean, covariance) and\n            `measurements[i]`.\n        \"\"\"\n        mean, covariance = self.project(mean, covariance)\n        if only_position:\n            mean, covariance = mean[:2], covariance[:2, :2]\n            measurements = measurements[:, :2]\n\n        d = measurements - mean\n        if metric == 'gaussian':\n            return np.sum(d * d, axis=1)\n        elif metric == 'maha':\n            cholesky_factor = np.linalg.cholesky(covariance)\n            z = scipy.linalg.solve_triangular(\n                cholesky_factor, d.T, lower=True, check_finite=False,\n                overwrite_b=True)\n            squared_maha = np.sum(z * z, axis=0)\n            return squared_maha\n        else:\n            raise ValueError('invalid distance metric')"
  },
  {
    "path": "yolox/tracker/matching.py",
    "content": "import cv2\nimport numpy as np\nimport scipy\nimport lap\nfrom scipy.spatial.distance import cdist\n\nfrom cython_bbox import bbox_overlaps as bbox_ious\nimport time\n\ndef merge_matches(m1, m2, shape):\n    O,P,Q = shape\n    m1 = np.asarray(m1)\n    m2 = np.asarray(m2)\n\n    M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P))\n    M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q))\n\n    mask = M1*M2\n    match = mask.nonzero()\n    match = list(zip(match[0], match[1]))\n    unmatched_O = tuple(set(range(O)) - set([i for i, j in match]))\n    unmatched_Q = tuple(set(range(Q)) - set([j for i, j in match]))\n\n    return match, unmatched_O, unmatched_Q\n\n\ndef _indices_to_matches(cost_matrix, indices, thresh):\n    matched_cost = cost_matrix[tuple(zip(*indices))]\n    matched_mask = (matched_cost <= thresh)\n\n    matches = indices[matched_mask]\n    unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0]))\n    unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1]))\n\n    return matches, unmatched_a, unmatched_b\n\n\ndef linear_assignment(cost_matrix, thresh):\n    if cost_matrix.size == 0:\n        return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1]))\n    matches, unmatched_a, unmatched_b = [], [], []\n    cost, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh)\n    for ix, mx in enumerate(x):\n        if mx >= 0:\n            matches.append([ix, mx])\n    unmatched_a = np.where(x < 0)[0]\n    unmatched_b = np.where(y < 0)[0]\n    matches = np.asarray(matches)\n    return matches, unmatched_a, unmatched_b\n\n\ndef ious(atlbrs, btlbrs):\n    \"\"\"\n    Compute cost based on IoU\n    :type atlbrs: list[tlbr] | np.ndarray\n    :type atlbrs: list[tlbr] | np.ndarray\n\n    :rtype ious np.ndarray\n    \"\"\"\n    ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float)\n    if ious.size == 0:\n        return ious\n\n    ious = bbox_ious(\n        np.ascontiguousarray(atlbrs, dtype=np.float),\n        np.ascontiguousarray(btlbrs, dtype=np.float)\n    )\n\n    return ious\n\n\ndef iou_distance(atracks, btracks):\n    \"\"\"\n    Compute cost based on IoU\n    :type atracks: list[STrack]\n    :type btracks: list[STrack]\n\n    :rtype cost_matrix np.ndarray\n    \"\"\"\n\n    if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):\n        atlbrs = atracks\n        btlbrs = btracks\n    else:\n        atlbrs = [track.tlbr for track in atracks]\n        btlbrs = [track.tlbr for track in btracks]\n    _ious = ious(atlbrs, btlbrs)\n    cost_matrix = 1 - _ious\n\n    return cost_matrix\n\ndef v_iou_distance(atracks, btracks):\n    \"\"\"\n    Compute cost based on IoU\n    :type atracks: list[STrack]\n    :type btracks: list[STrack]\n\n    :rtype cost_matrix np.ndarray\n    \"\"\"\n\n    if (len(atracks)>0 and isinstance(atracks[0], np.ndarray)) or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)):\n        atlbrs = atracks\n        btlbrs = btracks\n    else:\n        atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks]\n        btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks]\n    _ious = ious(atlbrs, btlbrs)\n    cost_matrix = 1 - _ious\n\n    return cost_matrix\n\ndef embedding_distance(tracks, detections, metric='cosine'):\n    \"\"\"\n    :param tracks: list[STrack]\n    :param detections: list[BaseTrack]\n    :param metric:\n    :return: cost_matrix np.ndarray\n    \"\"\"\n\n    cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float)\n    if cost_matrix.size == 0:\n        return cost_matrix\n    det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float)\n    #for i, track in enumerate(tracks):\n        #cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric))\n    track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float)\n    cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric))  # Nomalized features\n    return cost_matrix\n\n\n# def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False):\n#     if cost_matrix.size == 0:\n#         return cost_matrix\n#     gating_dim = 2 if only_position else 4\n#     gating_threshold = kalman_filter.chi2inv95[gating_dim]\n#     measurements = np.asarray([det.to_xyah() for det in detections])\n#     for row, track in enumerate(tracks):\n#         gating_distance = kf.gating_distance(\n#             track.mean, track.covariance, measurements, only_position)\n#         cost_matrix[row, gating_distance > gating_threshold] = np.inf\n#     return cost_matrix\n\n\n# def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98):\n#     if cost_matrix.size == 0:\n#         return cost_matrix\n#     gating_dim = 2 if only_position else 4\n#     gating_threshold = kalman_filter.chi2inv95[gating_dim]\n#     measurements = np.asarray([det.to_xyah() for det in detections])\n#     for row, track in enumerate(tracks):\n#         gating_distance = kf.gating_distance(\n#             track.mean, track.covariance, measurements, only_position, metric='maha')\n#         cost_matrix[row, gating_distance > gating_threshold] = np.inf\n#         cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance\n#     return cost_matrix\n\n\ndef fuse_iou(cost_matrix, tracks, detections):\n    if cost_matrix.size == 0:\n        return cost_matrix\n    reid_sim = 1 - cost_matrix\n    iou_dist = iou_distance(tracks, detections)\n    iou_sim = 1 - iou_dist\n    fuse_sim = reid_sim * (1 + iou_sim) / 2\n    det_scores = np.array([det.score for det in detections])\n    det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0)\n    #fuse_sim = fuse_sim * (1 + det_scores) / 2\n    fuse_cost = 1 - fuse_sim\n    return fuse_cost\n\n\ndef fuse_score(cost_matrix, scores):\n    if cost_matrix.size == 0:\n        return cost_matrix\n    iou_sim = 1 - cost_matrix\n    det_scores = np.array([score for score in scores])\n    det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0)\n    fuse_sim = iou_sim * det_scores\n    fuse_cost = 1 - fuse_sim\n    return fuse_cost"
  },
  {
    "path": "yolox/tracking_utils/evaluation.py",
    "content": "import os\nimport numpy as np\nimport copy\nimport motmetrics as mm\nmm.lap.default_solver = 'lap'\n\nfrom yolox.tracking_utils.io import read_results, unzip_objs\n\n\nclass Evaluator(object):\n\n    def __init__(self, data_root, seq_name, data_type):\n        self.data_root = data_root\n        self.seq_name = seq_name\n        self.data_type = data_type\n\n        self.load_annotations()\n        self.reset_accumulator()\n\n    def load_annotations(self):\n        assert self.data_type == 'mot'\n\n        gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')\n        self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)\n        self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)\n\n    def reset_accumulator(self):\n        self.acc = mm.MOTAccumulator(auto_id=True)\n\n    def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):\n        # results\n        trk_tlwhs = np.copy(trk_tlwhs)\n        trk_ids = np.copy(trk_ids)\n\n        # gts\n        gt_objs = self.gt_frame_dict.get(frame_id, [])\n        gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]\n\n        # ignore boxes\n        ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])\n        ignore_tlwhs = unzip_objs(ignore_objs)[0]\n\n        # remove ignored results\n        keep = np.ones(len(trk_tlwhs), dtype=bool)\n        iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)\n        if len(iou_distance) > 0:\n            match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)\n            match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])\n            match_ious = iou_distance[match_is, match_js]\n\n            match_js = np.asarray(match_js, dtype=int)\n            match_js = match_js[np.logical_not(np.isnan(match_ious))]\n            keep[match_js] = False\n            trk_tlwhs = trk_tlwhs[keep]\n            trk_ids = trk_ids[keep]\n        #match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)\n        #match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])\n        #match_ious = iou_distance[match_is, match_js]\n\n        #match_js = np.asarray(match_js, dtype=int)\n        #match_js = match_js[np.logical_not(np.isnan(match_ious))]\n        #keep[match_js] = False\n        #trk_tlwhs = trk_tlwhs[keep]\n        #trk_ids = trk_ids[keep]\n\n        # get distance matrix\n        iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)\n\n        # acc\n        self.acc.update(gt_ids, trk_ids, iou_distance)\n\n        if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):\n            events = self.acc.last_mot_events  # only supported by https://github.com/longcw/py-motmetrics\n        else:\n            events = None\n        return events\n\n    def eval_file(self, filename):\n        self.reset_accumulator()\n\n        result_frame_dict = read_results(filename, self.data_type, is_gt=False)\n        #frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))\n        frames = sorted(list(set(result_frame_dict.keys())))\n        for frame_id in frames:\n            trk_objs = result_frame_dict.get(frame_id, [])\n            trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]\n            self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)\n\n        return self.acc\n\n    @staticmethod\n    def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):\n        names = copy.deepcopy(names)\n        if metrics is None:\n            metrics = mm.metrics.motchallenge_metrics\n        metrics = copy.deepcopy(metrics)\n\n        mh = mm.metrics.create()\n        summary = mh.compute_many(\n            accs,\n            metrics=metrics,\n            names=names,\n            generate_overall=True\n        )\n\n        return summary\n\n    @staticmethod\n    def save_summary(summary, filename):\n        import pandas as pd\n        writer = pd.ExcelWriter(filename)\n        summary.to_excel(writer)\n        writer.save()"
  },
  {
    "path": "yolox/tracking_utils/io.py",
    "content": "import os\nfrom typing import Dict\nimport numpy as np\n\n\ndef write_results(filename, results_dict: Dict, data_type: str):\n    if not filename:\n        return\n    path = os.path.dirname(filename)\n    if not os.path.exists(path):\n        os.makedirs(path)\n\n    if data_type in ('mot', 'mcmot', 'lab'):\n        save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\\n'\n    elif data_type == 'kitti':\n        save_format = '{frame} {id} pedestrian -1 -1 -10 {x1} {y1} {x2} {y2} -1 -1 -1 -1000 -1000 -1000 -10 {score}\\n'\n    else:\n        raise ValueError(data_type)\n\n    with open(filename, 'w') as f:\n        for frame_id, frame_data in results_dict.items():\n            if data_type == 'kitti':\n                frame_id -= 1\n            for tlwh, track_id in frame_data:\n                if track_id < 0:\n                    continue\n                x1, y1, w, h = tlwh\n                x2, y2 = x1 + w, y1 + h\n                line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h, score=1.0)\n                f.write(line)\n\n\ndef read_results(filename, data_type: str, is_gt=False, is_ignore=False):\n    if data_type in ('mot', 'lab'):\n        read_fun = read_mot_results\n    else:\n        raise ValueError('Unknown data type: {}'.format(data_type))\n\n    return read_fun(filename, is_gt, is_ignore)\n\n\n\"\"\"\nlabels={'ped', ...\t\t\t% 1\n'person_on_vhcl', ...\t% 2\n'car', ...\t\t\t\t% 3\n'bicycle', ...\t\t\t% 4\n'mbike', ...\t\t\t% 5\n'non_mot_vhcl', ...\t\t% 6\n'static_person', ...\t% 7\n'distractor', ...\t\t% 8\n'occluder', ...\t\t\t% 9\n'occluder_on_grnd', ...\t\t%10\n'occluder_full', ...\t\t% 11\n'reflection', ...\t\t% 12\n'crowd' ...\t\t\t% 13\n};\n\"\"\"\n\n\ndef read_mot_results(filename, is_gt, is_ignore):\n    valid_labels = {1}\n    ignore_labels = {2, 7, 8, 12}\n    results_dict = dict()\n    if os.path.isfile(filename):\n        with open(filename, 'r') as f:\n            for line in f.readlines():\n                linelist = line.split(',')\n                if len(linelist) < 7:\n                    continue\n                fid = int(linelist[0])\n                if fid < 1:\n                    continue\n                results_dict.setdefault(fid, list())\n\n                box_size = float(linelist[4]) * float(linelist[5])\n\n                if is_gt:\n                    if 'MOT16-' in filename or 'MOT17-' in filename:\n                        label = int(float(linelist[7]))\n                        mark = int(float(linelist[6]))\n                        if mark == 0 or label not in valid_labels:\n                            continue\n                    score = 1\n                elif is_ignore:\n                    if 'MOT16-' in filename or 'MOT17-' in filename:\n                        label = int(float(linelist[7]))\n                        vis_ratio = float(linelist[8])\n                        if label not in ignore_labels and vis_ratio >= 0:\n                            continue\n                    else:\n                        continue\n                    score = 1\n                else:\n                    score = float(linelist[6])\n\n                #if box_size > 7000:\n                #if box_size <= 7000 or box_size >= 15000:\n                #if box_size < 15000:\n                    #continue\n\n                tlwh = tuple(map(float, linelist[2:6]))\n                target_id = int(linelist[1])\n\n                results_dict[fid].append((tlwh, target_id, score))\n\n    return results_dict\n\n\ndef unzip_objs(objs):\n    if len(objs) > 0:\n        tlwhs, ids, scores = zip(*objs)\n    else:\n        tlwhs, ids, scores = [], [], []\n    tlwhs = np.asarray(tlwhs, dtype=float).reshape(-1, 4)\n\n    return tlwhs, ids, scores"
  },
  {
    "path": "yolox/tracking_utils/timer.py",
    "content": "import time\n\n\nclass Timer(object):\n    \"\"\"A simple timer.\"\"\"\n    def __init__(self):\n        self.total_time = 0.\n        self.calls = 0\n        self.start_time = 0.\n        self.diff = 0.\n        self.average_time = 0.\n\n        self.duration = 0.\n\n    def tic(self):\n        # using time.time instead of time.clock because time time.clock\n        # does not normalize for multithreading\n        self.start_time = time.time()\n\n    def toc(self, average=True):\n        self.diff = time.time() - self.start_time\n        self.total_time += self.diff\n        self.calls += 1\n        self.average_time = self.total_time / self.calls\n        if average:\n            self.duration = self.average_time\n        else:\n            self.duration = self.diff\n        return self.duration\n\n    def clear(self):\n        self.total_time = 0.\n        self.calls = 0\n        self.start_time = 0.\n        self.diff = 0.\n        self.average_time = 0.\n        self.duration = 0."
  },
  {
    "path": "yolox/utils/__init__.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom .allreduce_norm import *\nfrom .boxes import *\nfrom .checkpoint import load_ckpt, save_checkpoint\nfrom .demo_utils import *\nfrom .dist import *\nfrom .ema import ModelEMA\nfrom .logger import setup_logger\nfrom .lr_scheduler import LRScheduler\nfrom .metric import *\nfrom .model_utils import *\nfrom .setup_env import *\nfrom .visualize import *\n"
  },
  {
    "path": "yolox/utils/allreduce_norm.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch\nfrom torch import distributed as dist\nfrom torch import nn\n\nimport pickle\nfrom collections import OrderedDict\n\nfrom .dist import _get_global_gloo_group, get_world_size\n\nASYNC_NORM = (\n    nn.BatchNorm1d,\n    nn.BatchNorm2d,\n    nn.BatchNorm3d,\n    nn.InstanceNorm1d,\n    nn.InstanceNorm2d,\n    nn.InstanceNorm3d,\n)\n\n__all__ = [\n    \"get_async_norm_states\",\n    \"pyobj2tensor\",\n    \"tensor2pyobj\",\n    \"all_reduce\",\n    \"all_reduce_norm\",\n]\n\n\ndef get_async_norm_states(module):\n    async_norm_states = OrderedDict()\n    for name, child in module.named_modules():\n        if isinstance(child, ASYNC_NORM):\n            for k, v in child.state_dict().items():\n                async_norm_states[\".\".join([name, k])] = v\n    return async_norm_states\n\n\ndef pyobj2tensor(pyobj, device=\"cuda\"):\n    \"\"\"serialize picklable python object to tensor\"\"\"\n    storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))\n    return torch.ByteTensor(storage).to(device=device)\n\n\ndef tensor2pyobj(tensor):\n    \"\"\"deserialize tensor to picklable python object\"\"\"\n    return pickle.loads(tensor.cpu().numpy().tobytes())\n\n\ndef _get_reduce_op(op_name):\n    return {\n        \"sum\": dist.ReduceOp.SUM,\n        \"mean\": dist.ReduceOp.SUM,\n    }[op_name.lower()]\n\n\ndef all_reduce(py_dict, op=\"sum\", group=None):\n    \"\"\"\n    Apply all reduce function for python dict object.\n    NOTE: make sure that every py_dict has the same keys and values are in the same shape.\n\n    Args:\n        py_dict (dict): dict to apply all reduce op.\n        op (str): operator, could be \"sum\" or \"mean\".\n    \"\"\"\n    world_size = get_world_size()\n    if world_size == 1:\n        return py_dict\n    if group is None:\n        group = _get_global_gloo_group()\n    if dist.get_world_size(group) == 1:\n        return py_dict\n\n    # all reduce logic across different devices.\n    py_key = list(py_dict.keys())\n    py_key_tensor = pyobj2tensor(py_key)\n    dist.broadcast(py_key_tensor, src=0)\n    py_key = tensor2pyobj(py_key_tensor)\n\n    tensor_shapes = [py_dict[k].shape for k in py_key]\n    tensor_numels = [py_dict[k].numel() for k in py_key]\n\n    flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key])\n    dist.all_reduce(flatten_tensor, op=_get_reduce_op(op))\n    if op == \"mean\":\n        flatten_tensor /= world_size\n\n    split_tensors = [\n        x.reshape(shape)\n        for x, shape in zip(torch.split(flatten_tensor, tensor_numels), tensor_shapes)\n    ]\n    return OrderedDict({k: v for k, v in zip(py_key, split_tensors)})\n\n\ndef all_reduce_norm(module):\n    \"\"\"\n    All reduce norm statistics in different devices.\n    \"\"\"\n    states = get_async_norm_states(module)\n    states = all_reduce(states, op=\"mean\")\n    module.load_state_dict(states, strict=False)\n"
  },
  {
    "path": "yolox/utils/box_ops.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nUtilities for bounding box manipulation and GIoU.\n\"\"\"\nimport torch\nfrom torchvision.ops.boxes import box_area\nfrom yolox.utils.cluster_nms import giou_3d\n\ndef box_cxcywh_to_xyxy(x):\n    x_c, y_c, w, h = x.unbind(-1)\n    b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\n         (x_c + 0.5 * w), (y_c + 0.5 * h)]\n    return torch.stack(b, dim=-1)\n\n\ndef box_xyxy_to_cxcywh(x):\n    x0, y0, x1, y1 = x.unbind(-1)\n    b = [(x0 + x1) / 2, (y0 + y1) / 2,\n         (x1 - x0), (y1 - y0)]\n    return torch.stack(b, dim=-1)\n\n\n# modified from torchvision to also return the union\ndef box_iou(boxes1, boxes2):\n    area1 = box_area(boxes1)\n    area2 = box_area(boxes2)\n\n    lt = torch.max(boxes1[:, None, :2], boxes2[:, :2])  # [N,M,2]\n    rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:])  # [N,M,2]\n\n    wh = (rb - lt).clamp(min=0)  # [N,M,2]\n    inter = wh[:, :, 0] * wh[:, :, 1]  # [N,M]\n\n    union = area1[:, None] + area2 - inter\n\n    iou = inter / union\n    return iou, union\n\n\ndef generalized_box_iou(boxes1,boxes2,boxes3,boxes4):\n    \"\"\"\n    Generalized IoU from https://giou.stanford.edu/\n\n    The boxes should be in [x0, y0, x1, y1] format\n\n    Returns a [N, M] pairwise matrix, where N = len(boxes1)\n    and M = len(boxes2)\n    \"\"\"\n    # degenerate boxes gives inf / nan results\n    # so do an early check\n    # boxes1=boxes1.float()\n    # boxes2=boxes2.float()\n    # boxes3=boxes3.float()\n    # boxes4=boxes4.float()\n    assert (boxes1[:, 2:] >= boxes1[:, :2]).all()\n    assert (boxes2[:, 2:] >= boxes2[:, :2]).all()\n    assert (boxes3[:, 2:] >= boxes3[:, :2]).all()\n    assert (boxes4[:, 2:] >= boxes4[:, :2]).all()\n    # iou1, union1 = box_iou(boxes1, boxes3)\n    # iou2, union2 = box_iou(boxes2, boxes4)\n    # lt = torch.min(boxes1[:, None, :2], boxes3[:, :2])\n    # rb = torch.max(boxes1[:, None, 2:], boxes3[:, 2:])\n\n    # wh = (rb - lt).clamp(min=0)  # [N,M,2]\n    # area1 = wh[:, :, 0] * wh[:, :, 1]\n\n    # lt = torch.min(boxes2[:, None, :2], boxes4[:, :2])\n    # rb = torch.max(boxes2[:, None, 2:], boxes4[:, 2:])\n\n    # wh = (rb - lt).clamp(min=0)  # [N,M,2]\n    # area2 = wh[:, :, 0] * wh[:, :, 1]\n    # uiou=(iou1*union1+iou2*union2)/(union1+union2)\n    # uunion=union1+union2\n    # uarea=area1+area2\n    # return  uiou- (uarea - uunion) / uarea\n\n    return giou_3d(boxes1,boxes3,boxes2,boxes4)\n\n\ndef masks_to_boxes(masks):\n    \"\"\"Compute the bounding boxes around the provided masks\n\n    The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.\n\n    Returns a [N, 4] tensors, with the boxes in xyxy format\n    \"\"\"\n    if masks.numel() == 0:\n        return torch.zeros((0, 4), device=masks.device)\n\n    h, w = masks.shape[-2:]\n\n    y = torch.arange(0, h, dtype=torch.float)\n    x = torch.arange(0, w, dtype=torch.float)\n    y, x = torch.meshgrid(y, x)\n\n    x_mask = (masks * x.unsqueeze(0))\n    x_max = x_mask.flatten(1).max(-1)[0]\n    x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]\n\n    y_mask = (masks * y.unsqueeze(0))\n    y_max = y_mask.flatten(1).max(-1)[0]\n    y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]\n\n    return torch.stack([x_min, y_min, x_max, y_max], 1)\n\n\n\n# boxes = targets[:, :4].copy()\n# labels = targets[:, 4].copy()\n# ids = targets[:, 5].copy()\n# if len(boxes) == 0:\n#     targets = np.zeros((self.max_labels, 6), dtype=np.float32)\n#     image, r_o = preproc(image, input_dim, self.means, self.std)\n#     image = np.ascontiguousarray(image, dtype=np.float32)\n#     return image, targets\n\n# image_o = image.copy()\n# targets_o = targets.copy()\n# height_o, width_o, _ = image_o.shape\n# boxes_o = targets_o[:, :4]\n# labels_o = targets_o[:, 4]\n# ids_o = targets_o[:, 5]\n# # bbox_o: [xyxy] to [c_x,c_y,w,h]\n# boxes_o = xyxy2cxcywh(boxes_o)\n\n# image_t = _distort(image)\n# image_t, boxes_t ,image_r,boxes_r= _mirror(image_t, boxes)\n# height, width, _ = image_t.shape\n# image_t, r_t = preproc(image_t, input_dim, self.means, self.std)\n# image_t, r_r = preproc(image_r, input_dim, self.means, self.std)\n# # boxes [xyxy] 2 [cx,cy,w,h]\n# boxes_t = xyxy2cxcywh(boxes_t)\n# boxes_t *= r_t\n\n# boxes_r = xyxy2cxcywh(boxes_r)\n# boxes_r *= r_r\n\n# mask_b = np.minimum(boxes_t[:, 2], boxes_t[:, 3]) > 1\n# boxes_t = boxes_t[mask_b]\n# boxes_r = boxes_r[mask_b]\n\n# labels_t = labels[mask_b]\n# ids_t = ids[mask_b]\n\n# if len(boxes_t) == 0:\n#     image_t, r_o = preproc(image_o, input_dim, self.means, self.std)\n#     boxes_o *= r_o\n#     boxes_t = boxes_o\n#     image_r=image_t\n#     boxes_r=boxes_t\n#     labels_t = labels_o\n#     ids_t = ids_o\n\n# labels_t = np.expand_dims(labels_t, 1)\n# ids_t = np.expand_dims(ids_t, 1)\n\n# targets_t = np.hstack((labels_t, boxes_t, ids_t))\n# padded_labels = np.zeros((self.max_labels, 6))\n# padded_labels[range(len(targets_t))[: self.max_labels]] = targets_t[\n#     : self.max_labels\n# ]\n\n# targets_r = np.hstack((labels_t, boxes_r, ids_t))\n# padded_labels_r = np.zeros((self.max_labels, 6))\n# padded_labels_r[range(len(targets_r))[: self.max_labels]] = targets_r[\n#     : self.max_labels\n# ]\n# padded_labels = np.ascontiguousarray(padded_labels, dtype=np.float32)\n# image_t = np.ascontiguousarray(image_t, dtype=np.float32)\n# return image_t, padded_labels\n"
  },
  {
    "path": "yolox/utils/boxes.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport numpy as np\n\nimport torch\nimport torchvision\nimport torch.nn.functional as F\nfrom .cluster_nms import cluster_nms\n\n__all__ = [\n    \"filter_box\",\n    \"postprocess\",\n    \"diffusion_postprocess\",\n    \"bboxes_iou\",\n    \"matrix_iou\",\n    \"adjust_box_anns\",\n    \"xyxy2xywh\",\n    \"xyxy2cxcywh\",\n]\n\n\ndef filter_box(output, scale_range):\n    \"\"\"\n    output: (N, 5+class) shape\n    \"\"\"\n    min_scale, max_scale = scale_range\n    w = output[:, 2] - output[:, 0]\n    h = output[:, 3] - output[:, 1]\n    keep = (w * h > min_scale * min_scale) & (w * h < max_scale * max_scale)\n    return output[keep]\n\n\ndef postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45):\n    box_corner = prediction.new(prediction.shape)\n    box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2\n    box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2\n    box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2\n    box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2\n    prediction[:, :, :4] = box_corner[:, :, :4]\n\n    output = [None for _ in range(len(prediction))]\n    for i, image_pred in enumerate(prediction):\n\n        # If none are remaining => process next image\n        if not image_pred.size(0):\n            continue\n        # Get score and class with highest confidence\n        class_conf, class_pred = torch.max(\n            image_pred[:, 5 : 5 + num_classes], 1, keepdim=True\n        )\n\n        conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= conf_thre).squeeze()\n        # _, conf_mask = torch.topk((image_pred[:, 4] * class_conf.squeeze()), 1000)\n        # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)\n        detections = torch.cat((image_pred[:, :5], class_conf, class_pred.float()), 1)\n        detections = detections[conf_mask]\n        if not detections.size(0):\n            continue\n\n        nms_out_index = torchvision.ops.batched_nms(\n            detections[:, :4],\n            detections[:, 4] * detections[:, 5],\n            detections[:, 6],\n            nms_thre,\n        )\n        detections = detections[nms_out_index]\n        if output[i] is None:\n            output[i] = detections\n        else:\n            output[i] = torch.cat((output[i], detections))\n\n    return output\n\ndef diffusion_postprocess(pre_prediction,cur_prediction,conf_scores,conf_thre=0.7,det_thre=0.65,nms_thre3d=0.7,nms_thre2d=0.75):\n\n    output = [None for _ in range(len(pre_prediction))]\n    for i,(pre_image_pred,cur_image_pred,asscociate_score) in enumerate(zip(pre_prediction,cur_prediction,conf_scores)):\n\n        asscociate_score=asscociate_score.flatten()\n        # If none are remaining => process next image\n        if not pre_image_pred.size(0):\n            continue\n        # _, conf_mask = torch.topk((image_pred[:, 4] * class_conf.squeeze()), 1000)\n        # Detections ordered as (x1, y1, x2, y2, association_conf, class_conf, class_pred)\n        detections=torch.zeros((2,len(cur_image_pred),7),dtype=cur_image_pred.dtype,device=cur_image_pred.device)\n        detections[0,:,:4]=pre_image_pred[:,:4]\n        detections[1,:,:4]=cur_image_pred[:,:4]\n        detections[0,:,4]=asscociate_score\n        detections[1,:,4]=asscociate_score\n        detections[0,:,5]=torch.sqrt(torch.sigmoid(pre_image_pred[:,4])*asscociate_score)\n        detections[1,:,5]=torch.sqrt(torch.sigmoid(cur_image_pred[:,4])*asscociate_score)\n\n        score_out_index=asscociate_score>conf_thre\n        detections=detections[:,score_out_index,:]\n\n        if not detections.size(1):\n            continue\n\n        nms_out_index_3d = cluster_nms(detections[0,:,:4],\n                                    detections[1,:,:4],\n                                    detections[0,:,4],\n                                    iou_threshold=nms_thre3d)\n        detections = detections[:,nms_out_index_3d,:]\n        detections=torch.cat([detections[0],detections[1]],dim=0)\n\n        class_score_out_index=detections[:,5]>det_thre\n\n        detections=detections[class_score_out_index]\n\n        nms_out_index_2d = torchvision.ops.batched_nms(\n                            detections[:,:4],\n                            detections[:,5],\n                            idxs=detections[:,6],\n                            iou_threshold=nms_thre2d)\n        \n        detections = detections[nms_out_index_2d]\n\n        if output[i] is None:\n            output[i] = detections\n        else:\n            output[i] = torch.cat((output[i], detections))\n\n    return output\n\ndef bboxes_iou(bboxes_a, bboxes_b, xyxy=True):\n    if bboxes_a.shape[1] != 4 or bboxes_b.shape[1] != 4:\n        raise IndexError\n\n    if xyxy:\n        tl = torch.max(bboxes_a[:, None, :2], bboxes_b[:, :2])\n        br = torch.min(bboxes_a[:, None, 2:], bboxes_b[:, 2:])\n        area_a = torch.prod(bboxes_a[:, 2:] - bboxes_a[:, :2], 1)\n        area_b = torch.prod(bboxes_b[:, 2:] - bboxes_b[:, :2], 1)\n    else:\n        tl = torch.max(\n            (bboxes_a[:, None, :2] - bboxes_a[:, None, 2:] / 2),\n            (bboxes_b[:, :2] - bboxes_b[:, 2:] / 2),\n        )\n        br = torch.min(\n            (bboxes_a[:, None, :2] + bboxes_a[:, None, 2:] / 2),\n            (bboxes_b[:, :2] + bboxes_b[:, 2:] / 2),\n        )\n\n        area_a = torch.prod(bboxes_a[:, 2:], 1)\n        area_b = torch.prod(bboxes_b[:, 2:], 1)\n    en = (tl < br).type(tl.type()).prod(dim=2)\n    area_i = torch.prod(br - tl, 2) * en  # * ((tl < br).all())\n    return area_i / (area_a[:, None] + area_b - area_i)\n\n\ndef matrix_iou(a, b):\n    \"\"\"\n    return iou of a and b, numpy version for data augenmentation\n    \"\"\"\n    lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])\n    rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])\n\n    area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)\n    area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)\n    area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)\n    return area_i / (area_a[:, np.newaxis] + area_b - area_i + 1e-12)\n\n\ndef adjust_box_anns(bbox, scale_ratio, padw, padh, w_max, h_max):\n    #bbox[:, 0::2] = np.clip(bbox[:, 0::2] * scale_ratio + padw, 0, w_max)\n    #bbox[:, 1::2] = np.clip(bbox[:, 1::2] * scale_ratio + padh, 0, h_max)\n    bbox[:, 0::2] = bbox[:, 0::2] * scale_ratio + padw\n    bbox[:, 1::2] = bbox[:, 1::2] * scale_ratio + padh\n    return bbox\n\n\ndef xyxy2xywh(bboxes):\n    bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 0]\n    bboxes[:, 3] = bboxes[:, 3] - bboxes[:, 1]\n    return bboxes\n\n\ndef xyxy2cxcywh(bboxes):\n    bboxes[:, 2] = bboxes[:, 2] - bboxes[:, 0]\n    bboxes[:, 3] = bboxes[:, 3] - bboxes[:, 1]\n    bboxes[:, 0] = bboxes[:, 0] + bboxes[:, 2] * 0.5\n    bboxes[:, 1] = bboxes[:, 1] + bboxes[:, 3] * 0.5\n    return bboxes\n"
  },
  {
    "path": "yolox/utils/checkpoint.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\nfrom loguru import logger\n\nimport torch\n\nimport os\nimport shutil\n\n\ndef load_ckpt(model, ckpt):\n    model_state_dict = model.state_dict()\n    load_dict = {}\n    for key_model, v in model_state_dict.items():\n        if key_model not in ckpt:\n            logger.warning(\n                \"{} is not in the ckpt. Please double check and see if this is desired.\".format(\n                    key_model\n                )\n            )\n            continue\n        v_ckpt = ckpt[key_model]\n        if v.shape != v_ckpt.shape:\n            logger.warning(\n                \"Shape of {} in checkpoint is {}, while shape of {} in model is {}.\".format(\n                    key_model, v_ckpt.shape, key_model, v.shape\n                )\n            )\n            continue\n        load_dict[key_model] = v_ckpt\n\n    model.load_state_dict(load_dict, strict=False)\n    return model\n\n\ndef save_checkpoint(state, is_best, save_dir, model_name=\"\"):\n    if not os.path.exists(save_dir):\n        os.makedirs(save_dir)\n    filename = os.path.join(save_dir, model_name + \"_ckpt.pth.tar\")\n    torch.save(state, filename)\n    if is_best:\n        best_filename = os.path.join(save_dir, \"best_ckpt.pth.tar\")\n        shutil.copyfile(filename, best_filename)\n"
  },
  {
    "path": "yolox/utils/cluster_nms.py",
    "content": "import torch\n\n@torch.jit.script\ndef intersect(box_a, box_b):\n    \"\"\" We resize both tensors to [A,B,2] without new malloc:\n    [A,2] -> [A,1,2] -> [A,B,2]\n    [B,2] -> [1,B,2] -> [A,B,2]\n    Then we compute the area of intersect between box_a and box_b.\n    Args:\n      box_a: (tensor) bounding boxes, Shape: [n,A,4].\n      box_b: (tensor) bounding boxes, Shape: [n,B,4].\n    Return:\n      (tensor) intersection area, Shape: [n,A,B].\n    \"\"\"\n    n = box_a.size(0)\n    A = box_a.size(1)\n    B = box_b.size(1)\n    max_xy = torch.min(box_a[:, :, 2:].unsqueeze(2).expand(n, A, B, 2),\n                       box_b[:, :, 2:].unsqueeze(1).expand(n, A, B, 2))\n    min_xy = torch.max(box_a[:, :, :2].unsqueeze(2).expand(n, A, B, 2),\n                       box_b[:, :, :2].unsqueeze(1).expand(n, A, B, 2))\n    return torch.clamp(max_xy - min_xy, min=0).prod(3)  # inter\n\n@torch.jit.script\ndef garea(box_a, box_b):\n    \"\"\" We resize both tensors to [A,B,2] without new malloc:\n    [A,2] -> [A,1,2] -> [A,B,2]\n    [B,2] -> [1,B,2] -> [A,B,2]\n    Then we compute the area of intersect between box_a and box_b.\n    Args:\n      box_a: (tensor) bounding boxes, Shape: [n,A,4].\n      box_b: (tensor) bounding boxes, Shape: [n,B,4].\n    Return:\n      (tensor) intersection area, Shape: [n,A,B].\n    \"\"\"\n    n = box_a.size(0)\n    A = box_a.size(1)\n    B = box_b.size(1)\n    max_xy = torch.max(box_a[:, :, 2:].unsqueeze(2).expand(n, A, B, 2),\n                       box_b[:, :, 2:].unsqueeze(1).expand(n, A, B, 2))\n    min_xy = torch.min(box_a[:, :, :2].unsqueeze(2).expand(n, A, B, 2),\n                       box_b[:, :, :2].unsqueeze(1).expand(n, A, B, 2))\n    return torch.clamp(max_xy - min_xy, min=0).prod(3)  # inter\n\n@torch.jit.script\ndef get_box_area(box):\n    return (box[:, :, 2]-box[:, :, 0]) *(box[:, :, 3]-box[:, :, 1])\n\ndef giou_3d(box_a,box_b,box_c,box_d):\n    \"\"\"Compute the jaccard overlap of two sets of boxes.  The jaccard overlap\n    is simply the intersection over union of two boxes.  Here we operate on\n    ground truth boxes and default boxes. If iscrowd=True, put the crowd in box_b.\n    E.g.:\n        A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n    Args:\n        box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]\n        box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]\n    Return:\n        jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]\n    \"\"\"\n    use_batch = True\n    if box_a.dim() == 2:\n        use_batch = False\n        box_a = box_a[None, ...]\n        box_b = box_b[None, ...]\n        box_c = box_c[None, ...]\n        box_d = box_d[None, ...]\n\n    interab = intersect(box_a,box_b)\n    intercd = intersect(box_c,box_d)\n    \n    area_ab= garea(box_a,box_b)\n    area_cd=garea(box_c,box_d)\n\n    area_a = get_box_area(box_a).unsqueeze(2).expand_as(interab)  # [A,B]\n    area_b = get_box_area(box_b).unsqueeze(1).expand_as(interab)  # [A,B]\n    area_c = get_box_area(box_c).unsqueeze(2).expand_as(intercd)  # [A,B]\n    area_d = get_box_area(box_d).unsqueeze(1).expand_as(intercd)  # [A,B]\n    unionab = area_a + area_b - interab\n    unioncd = area_c+area_d-intercd\n\n    uiouabcd = (interab+intercd) / (unionab+unioncd)\n    out=uiouabcd-(area_ab+area_cd-unionab-unioncd)/(area_ab+area_cd)\n    return out if use_batch else out.squeeze(0)\n\ndef cluster_nms(boxes_a,boxes_c,scores,iou_threshold:float=0.5, top_k:int=500):\n    # Collapse all the classes into 1 \n    _, idx = scores.sort(0, descending=True)\n    idx = idx[:top_k]\n    boxes_a = boxes_a[idx]\n    boxes_b = boxes_a\n    boxes_c = boxes_c[idx]\n    boxes_d = boxes_c\n    iou = giou_3d(boxes_a,boxes_b,boxes_c,boxes_d).triu_(diagonal=1)\n    B = iou\n    for i in range(200):\n        A=B\n        maxA,_=torch.max(A, dim=0)\n        E = (maxA<=iou_threshold).float().unsqueeze(1).expand_as(A)\n        B=iou.mul(E)\n        if A.equal(B)==True:\n            break\n    idx_out = idx[maxA <= iou_threshold]\n    return idx_out\n\n\n\n# ## test\n\n# boxes_a=[[100,100,200,200],\n#          [110,110,210,210],\n#          [50,50,150,150],\n#          [100,100,200,200],\n#          [90,90,190,190],]\n\n# boxes_c=[[100,100,200,200],\n#          [110,110,210,210],\n#          [150,150,250,250],\n#          [0,0,100,100],\n#          [10,10,110,110],]\n\n# scores=[0.91,0.9,0.95,0.9,0.8]\n\n# boxes_a=torch.tensor(boxes_a,dtype=torch.float)\n# boxes_c=torch.tensor(boxes_c,dtype=torch.float)\n# scores=torch.tensor(scores,dtype=torch.float)\n\n\n# indix=cluster_nms(boxes_a,boxes_c,scores)\n# print(indix)\n"
  },
  {
    "path": "yolox/utils/demo_utils.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport numpy as np\n\nimport os\n\n__all__ = [\"mkdir\", \"nms\", \"multiclass_nms\", \"demo_postprocess\"]\n\n\ndef mkdir(path):\n    if not os.path.exists(path):\n        os.makedirs(path)\n\n\ndef nms(boxes, scores, nms_thr):\n    \"\"\"Single class NMS implemented in Numpy.\"\"\"\n    x1 = boxes[:, 0]\n    y1 = boxes[:, 1]\n    x2 = boxes[:, 2]\n    y2 = boxes[:, 3]\n\n    areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n    order = scores.argsort()[::-1]\n\n    keep = []\n    while order.size > 0:\n        i = order[0]\n        keep.append(i)\n        xx1 = np.maximum(x1[i], x1[order[1:]])\n        yy1 = np.maximum(y1[i], y1[order[1:]])\n        xx2 = np.minimum(x2[i], x2[order[1:]])\n        yy2 = np.minimum(y2[i], y2[order[1:]])\n\n        w = np.maximum(0.0, xx2 - xx1 + 1)\n        h = np.maximum(0.0, yy2 - yy1 + 1)\n        inter = w * h\n        ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n        inds = np.where(ovr <= nms_thr)[0]\n        order = order[inds + 1]\n\n    return keep\n\n\ndef multiclass_nms(boxes, scores, nms_thr, score_thr):\n    \"\"\"Multiclass NMS implemented in Numpy\"\"\"\n    final_dets = []\n    num_classes = scores.shape[1]\n    for cls_ind in range(num_classes):\n        cls_scores = scores[:, cls_ind]\n        valid_score_mask = cls_scores > score_thr\n        if valid_score_mask.sum() == 0:\n            continue\n        else:\n            valid_scores = cls_scores[valid_score_mask]\n            valid_boxes = boxes[valid_score_mask]\n            keep = nms(valid_boxes, valid_scores, nms_thr)\n            if len(keep) > 0:\n                cls_inds = np.ones((len(keep), 1)) * cls_ind\n                dets = np.concatenate(\n                    [valid_boxes[keep], valid_scores[keep, None], cls_inds], 1\n                )\n                final_dets.append(dets)\n    if len(final_dets) == 0:\n        return None\n    return np.concatenate(final_dets, 0)\n\n\ndef demo_postprocess(outputs, img_size, p6=False):\n\n    grids = []\n    expanded_strides = []\n\n    if not p6:\n        strides = [8, 16, 32]\n    else:\n        strides = [8, 16, 32, 64]\n\n    hsizes = [img_size[0] // stride for stride in strides]\n    wsizes = [img_size[1] // stride for stride in strides]\n\n    for hsize, wsize, stride in zip(hsizes, wsizes, strides):\n        xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))\n        grid = np.stack((xv, yv), 2).reshape(1, -1, 2)\n        grids.append(grid)\n        shape = grid.shape[:2]\n        expanded_strides.append(np.full((*shape, 1), stride))\n\n    grids = np.concatenate(grids, 1)\n    expanded_strides = np.concatenate(expanded_strides, 1)\n    outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides\n    outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides\n\n    return outputs\n"
  },
  {
    "path": "yolox/utils/dist.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# This file mainly comes from\n# https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/comm.py\n# Copyright (c) Facebook, Inc. and its affiliates.\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\"\"\"\nThis file contains primitives for multi-gpu communication.\nThis is useful when doing distributed training.\n\"\"\"\n\nimport numpy as np\n\nimport torch\nfrom torch import distributed as dist\n\nimport functools\nimport logging\nimport pickle\nimport time\n\n__all__ = [\n    \"is_main_process\",\n    \"synchronize\",\n    \"get_world_size\",\n    \"get_rank\",\n    \"get_local_rank\",\n    \"get_local_size\",\n    \"time_synchronized\",\n    \"gather\",\n    \"all_gather\",\n]\n\n_LOCAL_PROCESS_GROUP = None\n\n\ndef synchronize():\n    \"\"\"\n    Helper function to synchronize (barrier) among all processes when using distributed training\n    \"\"\"\n    if not dist.is_available():\n        return\n    if not dist.is_initialized():\n        return\n    world_size = dist.get_world_size()\n    if world_size == 1:\n        return\n    dist.barrier()\n\n\ndef get_world_size() -> int:\n    if not dist.is_available():\n        return 1\n    if not dist.is_initialized():\n        return 1\n    return dist.get_world_size()\n\n\ndef get_rank() -> int:\n    if not dist.is_available():\n        return 0\n    if not dist.is_initialized():\n        return 0\n    return dist.get_rank()\n\n\ndef get_local_rank() -> int:\n    \"\"\"\n    Returns:\n        The rank of the current process within the local (per-machine) process group.\n    \"\"\"\n    if not dist.is_available():\n        return 0\n    if not dist.is_initialized():\n        return 0\n    assert _LOCAL_PROCESS_GROUP is not None\n    return dist.get_rank(group=_LOCAL_PROCESS_GROUP)\n\n\ndef get_local_size() -> int:\n    \"\"\"\n    Returns:\n        The size of the per-machine process group, i.e. the number of processes per machine.\n    \"\"\"\n    if not dist.is_available():\n        return 1\n    if not dist.is_initialized():\n        return 1\n    return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)\n\n\ndef is_main_process() -> bool:\n    return get_rank() == 0\n\n\n@functools.lru_cache()\ndef _get_global_gloo_group():\n    \"\"\"\n    Return a process group based on gloo backend, containing all the ranks\n    The result is cached.\n    \"\"\"\n    if dist.get_backend() == \"nccl\":\n        return dist.new_group(backend=\"gloo\")\n    else:\n        return dist.group.WORLD\n\n\ndef _serialize_to_tensor(data, group):\n    backend = dist.get_backend(group)\n    assert backend in [\"gloo\", \"nccl\"]\n    device = torch.device(\"cpu\" if backend == \"gloo\" else \"cuda\")\n\n    buffer = pickle.dumps(data)\n    if len(buffer) > 1024 ** 3:\n        logger = logging.getLogger(__name__)\n        logger.warning(\n            \"Rank {} trying to all-gather {:.2f} GB of data on device {}\".format(\n                get_rank(), len(buffer) / (1024 ** 3), device\n            )\n        )\n    storage = torch.ByteStorage.from_buffer(buffer)\n    tensor = torch.ByteTensor(storage).to(device=device)\n    return tensor\n\n\ndef _pad_to_largest_tensor(tensor, group):\n    \"\"\"\n    Returns:\n        list[int]: size of the tensor, on each rank\n        Tensor: padded tensor that has the max size\n    \"\"\"\n    world_size = dist.get_world_size(group=group)\n    assert (\n        world_size >= 1\n    ), \"comm.gather/all_gather must be called from ranks within the given group!\"\n    local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)\n    size_list = [\n        torch.zeros([1], dtype=torch.int64, device=tensor.device)\n        for _ in range(world_size)\n    ]\n    dist.all_gather(size_list, local_size, group=group)\n    size_list = [int(size.item()) for size in size_list]\n\n    max_size = max(size_list)\n\n    # we pad the tensor because torch all_gather does not support\n    # gathering tensors of different shapes\n    if local_size != max_size:\n        padding = torch.zeros(\n            (max_size - local_size,), dtype=torch.uint8, device=tensor.device\n        )\n        tensor = torch.cat((tensor, padding), dim=0)\n    return size_list, tensor\n\n\ndef all_gather(data, group=None):\n    \"\"\"\n    Run all_gather on arbitrary picklable data (not necessarily tensors).\n\n    Args:\n        data: any picklable object\n        group: a torch process group. By default, will use a group which\n            contains all ranks on gloo backend.\n    Returns:\n        list[data]: list of data gathered from each rank\n    \"\"\"\n    if get_world_size() == 1:\n        return [data]\n    if group is None:\n        group = _get_global_gloo_group()\n    if dist.get_world_size(group) == 1:\n        return [data]\n\n    tensor = _serialize_to_tensor(data, group)\n\n    size_list, tensor = _pad_to_largest_tensor(tensor, group)\n    max_size = max(size_list)\n\n    # receiving Tensor from all ranks\n    tensor_list = [\n        torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)\n        for _ in size_list\n    ]\n    dist.all_gather(tensor_list, tensor, group=group)\n\n    data_list = []\n    for size, tensor in zip(size_list, tensor_list):\n        buffer = tensor.cpu().numpy().tobytes()[:size]\n        data_list.append(pickle.loads(buffer))\n\n    return data_list\n\n\ndef gather(data, dst=0, group=None):\n    \"\"\"\n    Run gather on arbitrary picklable data (not necessarily tensors).\n\n    Args:\n        data: any picklable object\n        dst (int): destination rank\n        group: a torch process group. By default, will use a group which\n            contains all ranks on gloo backend.\n\n    Returns:\n        list[data]: on dst, a list of data gathered from each rank. Otherwise,\n            an empty list.\n    \"\"\"\n    if get_world_size() == 1:\n        return [data]\n    if group is None:\n        group = _get_global_gloo_group()\n    if dist.get_world_size(group=group) == 1:\n        return [data]\n    rank = dist.get_rank(group=group)\n\n    tensor = _serialize_to_tensor(data, group)\n    size_list, tensor = _pad_to_largest_tensor(tensor, group)\n\n    # receiving Tensor from all ranks\n    if rank == dst:\n        max_size = max(size_list)\n        tensor_list = [\n            torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)\n            for _ in size_list\n        ]\n        dist.gather(tensor, tensor_list, dst=dst, group=group)\n\n        data_list = []\n        for size, tensor in zip(size_list, tensor_list):\n            buffer = tensor.cpu().numpy().tobytes()[:size]\n            data_list.append(pickle.loads(buffer))\n        return data_list\n    else:\n        dist.gather(tensor, [], dst=dst, group=group)\n        return []\n\n\ndef shared_random_seed():\n    \"\"\"\n    Returns:\n        int: a random number that is the same across all workers.\n            If workers need a shared RNG, they can use this shared seed to\n            create one.\n    All workers must call this function, otherwise it will deadlock.\n    \"\"\"\n    ints = np.random.randint(2 ** 31)\n    all_ints = all_gather(ints)\n    return all_ints[0]\n\n\ndef time_synchronized():\n    \"\"\"pytorch-accurate time\"\"\"\n    if torch.cuda.is_available():\n        torch.cuda.synchronize()\n    return time.time()\n\ndef is_dist_avail_and_initialized():\n    if not dist.is_available():\n        return False\n    if not dist.is_initialized():\n        return False\n    return True\n"
  },
  {
    "path": "yolox/utils/ema.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\nimport torch\nimport torch.nn as nn\n\nimport math\nfrom copy import deepcopy\n\n\ndef is_parallel(model):\n    \"\"\"check if model is in parallel mode.\"\"\"\n\n    parallel_type = (\n        nn.parallel.DataParallel,\n        nn.parallel.DistributedDataParallel,\n    )\n    return isinstance(model, parallel_type)\n\n\ndef copy_attr(a, b, include=(), exclude=()):\n    # Copy attributes from b to a, options to only include [...] and to exclude [...]\n    for k, v in b.__dict__.items():\n        if (len(include) and k not in include) or k.startswith(\"_\") or k in exclude:\n            continue\n        else:\n            setattr(a, k, v)\n\n\nclass ModelEMA:\n    \"\"\"\n    Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models\n    Keep a moving average of everything in the model state_dict (parameters and buffers).\n    This is intended to allow functionality like\n    https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n    A smoothed version of the weights is necessary for some training schemes to perform well.\n    This class is sensitive where it is initialized in the sequence of model init,\n    GPU assignment and distributed training wrappers.\n    \"\"\"\n\n    def __init__(self, model, decay=0.9999, updates=0):\n        \"\"\"\n        Args:\n            model (nn.Module): model to apply EMA.\n            decay (float): ema decay reate.\n            updates (int): counter of EMA updates.\n        \"\"\"\n        # Create EMA(FP32)\n        self.ema = deepcopy(model.module if is_parallel(model) else model).eval()\n        self.updates = updates\n        # decay exponential ramp (to help early epochs)\n        self.decay = lambda x: decay * (1 - math.exp(-x / 2000))\n        for p in self.ema.parameters():\n            p.requires_grad_(False)\n\n    def update(self, model):\n        # Update EMA parameters\n        with torch.no_grad():\n            self.updates += 1\n            d = self.decay(self.updates)\n\n            msd = (\n                model.module.state_dict() if is_parallel(model) else model.state_dict()\n            )  # model state_dict\n            for k, v in self.ema.state_dict().items():\n                if v.dtype.is_floating_point:\n                    v *= d\n                    v += (1.0 - d) * msd[k].detach()\n\n    def update_attr(self, model, include=(), exclude=(\"process_group\", \"reducer\")):\n        # Update EMA attributes\n        copy_attr(self.ema, model, include, exclude)\n"
  },
  {
    "path": "yolox/utils/logger.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom loguru import logger\n\nimport inspect\nimport os\nimport sys\n\n\ndef get_caller_name(depth=0):\n    \"\"\"\n    Args:\n        depth (int): Depth of caller conext, use 0 for caller depth. Default value: 0.\n\n    Returns:\n        str: module name of the caller\n    \"\"\"\n    # the following logic is a little bit faster than inspect.stack() logic\n    frame = inspect.currentframe().f_back\n    for _ in range(depth):\n        frame = frame.f_back\n\n    return frame.f_globals[\"__name__\"]\n\n\nclass StreamToLoguru:\n    \"\"\"\n    stream object that redirects writes to a logger instance.\n    \"\"\"\n\n    def __init__(self, level=\"INFO\", caller_names=(\"apex\", \"pycocotools\")):\n        \"\"\"\n        Args:\n            level(str): log level string of loguru. Default value: \"INFO\".\n            caller_names(tuple): caller names of redirected module.\n                Default value: (apex, pycocotools).\n        \"\"\"\n        self.level = level\n        self.linebuf = \"\"\n        self.caller_names = caller_names\n\n    def write(self, buf):\n        full_name = get_caller_name(depth=1)\n        module_name = full_name.rsplit(\".\", maxsplit=-1)[0]\n        if module_name in self.caller_names:\n            for line in buf.rstrip().splitlines():\n                # use caller level log\n                logger.opt(depth=2).log(self.level, line.rstrip())\n        else:\n            sys.__stdout__.write(buf)\n\n    def flush(self):\n        pass\n\n\ndef redirect_sys_output(log_level=\"INFO\"):\n    redirect_logger = StreamToLoguru(log_level)\n    sys.stderr = redirect_logger\n    sys.stdout = redirect_logger\n\n\ndef setup_logger(save_dir, distributed_rank=0, filename=\"log.txt\", mode=\"a\"):\n    \"\"\"setup logger for training and testing.\n    Args:\n        save_dir(str): location to save log file\n        distributed_rank(int): device rank when multi-gpu environment\n        filename (string): log save name.\n        mode(str): log file write mode, `append` or `override`. default is `a`.\n\n    Return:\n        logger instance.\n    \"\"\"\n    loguru_format = (\n        \"<green>{time:YYYY-MM-DD HH:mm:ss}</green> | \"\n        \"<level>{level: <8}</level> | \"\n        \"<cyan>{name}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>\"\n    )\n\n    logger.remove()\n    save_file = os.path.join(save_dir, filename)\n    if mode == \"o\" and os.path.exists(save_file):\n        os.remove(save_file)\n    # only keep logger in rank0 process\n    if distributed_rank == 0:\n        logger.add(\n            sys.stderr,\n            format=loguru_format,\n            level=\"INFO\",\n            enqueue=True,\n        )\n        logger.add(save_file)\n\n    # redirect stdout/stderr to loguru\n    redirect_sys_output(\"INFO\")\n"
  },
  {
    "path": "yolox/utils/lr_scheduler.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport math\nfrom functools import partial\n\n\nclass LRScheduler:\n    def __init__(self, name, lr, iters_per_epoch, total_epochs, **kwargs):\n        \"\"\"\n        Supported lr schedulers: [cos, warmcos, multistep]\n\n        Args:\n            lr (float): learning rate.\n            iters_per_peoch (int): number of iterations in one epoch.\n            total_epochs (int): number of epochs in training.\n            kwargs (dict):\n                - cos: None\n                - warmcos: [warmup_epochs, warmup_lr_start (default 1e-6)]\n                - multistep: [milestones (epochs), gamma (default 0.1)]\n        \"\"\"\n\n        self.lr = lr\n        self.iters_per_epoch = iters_per_epoch\n        self.total_epochs = total_epochs\n        self.total_iters = iters_per_epoch * total_epochs\n\n        self.__dict__.update(kwargs)\n\n        self.lr_func = self._get_lr_func(name)\n\n    def update_lr(self, iters):\n        return self.lr_func(iters)\n\n    def _get_lr_func(self, name):\n        if name == \"cos\":  # cosine lr schedule\n            lr_func = partial(cos_lr, self.lr, self.total_iters)\n        elif name == \"warmcos\":\n            warmup_total_iters = self.iters_per_epoch * self.warmup_epochs\n            warmup_lr_start = getattr(self, \"warmup_lr_start\", 1e-6)\n            lr_func = partial(\n                warm_cos_lr,\n                self.lr,\n                self.total_iters,\n                warmup_total_iters,\n                warmup_lr_start,\n            )\n        elif name == \"yoloxwarmcos\":\n            warmup_total_iters = self.iters_per_epoch * self.warmup_epochs\n            no_aug_iters = self.iters_per_epoch * self.no_aug_epochs\n            warmup_lr_start = getattr(self, \"warmup_lr_start\", 0)\n            min_lr_ratio = getattr(self, \"min_lr_ratio\", 0.2)\n            lr_func = partial(\n                yolox_warm_cos_lr,\n                self.lr,\n                min_lr_ratio,\n                self.total_iters,\n                warmup_total_iters,\n                warmup_lr_start,\n                no_aug_iters,\n            )\n        elif name == \"yoloxsemiwarmcos\":\n            warmup_lr_start = getattr(self, \"warmup_lr_start\", 0)\n            min_lr_ratio = getattr(self, \"min_lr_ratio\", 0.2)\n            warmup_total_iters = self.iters_per_epoch * self.warmup_epochs\n            no_aug_iters = self.iters_per_epoch * self.no_aug_epochs\n            normal_iters = self.iters_per_epoch * self.semi_epoch\n            semi_iters = self.iters_per_epoch_semi * (\n                self.total_epochs - self.semi_epoch - self.no_aug_epochs\n            )\n            lr_func = partial(\n                yolox_semi_warm_cos_lr,\n                self.lr,\n                min_lr_ratio,\n                warmup_lr_start,\n                self.total_iters,\n                normal_iters,\n                no_aug_iters,\n                warmup_total_iters,\n                semi_iters,\n                self.iters_per_epoch,\n                self.iters_per_epoch_semi,\n            )\n        elif name == \"multistep\":  # stepwise lr schedule\n            milestones = [\n                int(self.total_iters * milestone / self.total_epochs)\n                for milestone in self.milestones\n            ]\n            gamma = getattr(self, \"gamma\", 0.1)\n            lr_func = partial(multistep_lr, self.lr, milestones, gamma)\n        else:\n            raise ValueError(\"Scheduler version {} not supported.\".format(name))\n        return lr_func\n\n\ndef cos_lr(lr, total_iters, iters):\n    \"\"\"Cosine learning rate\"\"\"\n    lr *= 0.5 * (1.0 + math.cos(math.pi * iters / total_iters))\n    return lr\n\n\ndef warm_cos_lr(lr, total_iters, warmup_total_iters, warmup_lr_start, iters):\n    \"\"\"Cosine learning rate with warm up.\"\"\"\n    if iters <= warmup_total_iters:\n        lr = (lr - warmup_lr_start) * iters / float(\n            warmup_total_iters\n        ) + warmup_lr_start\n    else:\n        lr *= 0.5 * (\n            1.0\n            + math.cos(\n                math.pi\n                * (iters - warmup_total_iters)\n                / (total_iters - warmup_total_iters)\n            )\n        )\n    return lr\n\n\ndef yolox_warm_cos_lr(\n    lr,\n    min_lr_ratio,\n    total_iters,\n    warmup_total_iters,\n    warmup_lr_start,\n    no_aug_iter,\n    iters,\n):\n    \"\"\"Cosine learning rate with warm up.\"\"\"\n    min_lr = lr * min_lr_ratio\n    if iters <= warmup_total_iters:\n        # lr = (lr - warmup_lr_start) * iters / float(warmup_total_iters) + warmup_lr_start\n        lr = (lr - warmup_lr_start) * pow(\n            iters / float(warmup_total_iters), 2\n        ) + warmup_lr_start\n    elif iters >= total_iters - no_aug_iter:\n        lr = min_lr\n    else:\n        lr = min_lr + 0.5 * (lr - min_lr) * (\n            1.0\n            + math.cos(\n                math.pi\n                * (iters - warmup_total_iters)\n                / (total_iters - warmup_total_iters - no_aug_iter)\n            )\n        )\n    return lr\n\n\ndef yolox_semi_warm_cos_lr(\n    lr,\n    min_lr_ratio,\n    warmup_lr_start,\n    total_iters,\n    normal_iters,\n    no_aug_iters,\n    warmup_total_iters,\n    semi_iters,\n    iters_per_epoch,\n    iters_per_epoch_semi,\n    iters,\n):\n    \"\"\"Cosine learning rate with warm up.\"\"\"\n    min_lr = lr * min_lr_ratio\n    if iters <= warmup_total_iters:\n        # lr = (lr - warmup_lr_start) * iters / float(warmup_total_iters) + warmup_lr_start\n        lr = (lr - warmup_lr_start) * pow(\n            iters / float(warmup_total_iters), 2\n        ) + warmup_lr_start\n    elif iters >= normal_iters + semi_iters:\n        lr = min_lr\n    elif iters <= normal_iters:\n        lr = min_lr + 0.5 * (lr - min_lr) * (\n            1.0\n            + math.cos(\n                math.pi\n                * (iters - warmup_total_iters)\n                / (total_iters - warmup_total_iters - no_aug_iters)\n            )\n        )\n    else:\n        lr = min_lr + 0.5 * (lr - min_lr) * (\n            1.0\n            + math.cos(\n                math.pi\n                * (\n                    normal_iters\n                    - warmup_total_iters\n                    + (iters - normal_iters)\n                    * iters_per_epoch\n                    * 1.0\n                    / iters_per_epoch_semi\n                )\n                / (total_iters - warmup_total_iters - no_aug_iters)\n            )\n        )\n    return lr\n\n\ndef multistep_lr(lr, milestones, gamma, iters):\n    \"\"\"MultiStep learning rate\"\"\"\n    for milestone in milestones:\n        lr *= gamma if iters >= milestone else 1.0\n    return lr\n"
  },
  {
    "path": "yolox/utils/metric.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\nimport numpy as np\n\nimport torch\n\nimport functools\nimport os\nimport time\nfrom collections import defaultdict, deque\n\n__all__ = [\n    \"AverageMeter\",\n    \"MeterBuffer\",\n    \"get_total_and_free_memory_in_Mb\",\n    \"occupy_mem\",\n    \"gpu_mem_usage\",\n]\n\n\ndef get_total_and_free_memory_in_Mb(cuda_device):\n    devices_info_str = os.popen(\n        \"nvidia-smi --query-gpu=memory.total,memory.used --format=csv,nounits,noheader\"\n    )\n    devices_info = devices_info_str.read().strip().split(\"\\n\")\n    total, used = devices_info[int(cuda_device)].split(\",\")\n    return int(total), int(used)\n\n\ndef occupy_mem(cuda_device, mem_ratio=0.95):\n    \"\"\"\n    pre-allocate gpu memory for training to avoid memory Fragmentation.\n    \"\"\"\n    total, used = get_total_and_free_memory_in_Mb(cuda_device)\n    max_mem = int(total * mem_ratio)\n    block_mem = max_mem - used\n    x = torch.cuda.FloatTensor(256, 1024, block_mem)\n    del x\n    time.sleep(5)\n\n\ndef gpu_mem_usage():\n    \"\"\"\n    Compute the GPU memory usage for the current device (MB).\n    \"\"\"\n    mem_usage_bytes = torch.cuda.max_memory_allocated()\n    return mem_usage_bytes / (1024 * 1024)\n\n\nclass AverageMeter:\n    \"\"\"Track a series of values and provide access to smoothed values over a\n    window or the global series average.\n    \"\"\"\n\n    def __init__(self, window_size=50):\n        self._deque = deque(maxlen=window_size)\n        self._total = 0.0\n        self._count = 0\n\n    def update(self, value):\n        self._deque.append(value)\n        self._count += 1\n        self._total += value\n\n    @property\n    def median(self):\n        d = np.array(list(self._deque))\n        return np.median(d)\n\n    @property\n    def avg(self):\n        # if deque is empty, nan will be returned.\n        d = np.array(list(self._deque))\n        return d.mean()\n\n    @property\n    def global_avg(self):\n        return self._total / max(self._count, 1e-5)\n\n    @property\n    def latest(self):\n        return self._deque[-1] if len(self._deque) > 0 else None\n\n    @property\n    def total(self):\n        return self._total\n\n    def reset(self):\n        self._deque.clear()\n        self._total = 0.0\n        self._count = 0\n\n    def clear(self):\n        self._deque.clear()\n\n\nclass MeterBuffer(defaultdict):\n    \"\"\"Computes and stores the average and current value\"\"\"\n\n    def __init__(self, window_size=20):\n        factory = functools.partial(AverageMeter, window_size=window_size)\n        super().__init__(factory)\n\n    def reset(self):\n        for v in self.values():\n            v.reset()\n\n    def get_filtered_meter(self, filter_key=\"time\"):\n        return {k: v for k, v in self.items() if filter_key in k}\n\n    def update(self, values=None, **kwargs):\n        if values is None:\n            values = {}\n        values.update(kwargs)\n        for k, v in values.items():\n            if isinstance(v, torch.Tensor):\n                v = v.detach()\n            self[k].update(v)\n\n    def clear_meters(self):\n        for v in self.values():\n            v.clear()\n"
  },
  {
    "path": "yolox/utils/model_utils.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch\nimport torch.nn as nn\nfrom thop import profile\n\nfrom copy import deepcopy\n\n__all__ = [\n    \"fuse_conv_and_bn\",\n    \"fuse_model\",\n    \"get_model_info\",\n    \"replace_module\",\n]\n\n\ndef get_model_info(model, tsize):\n\n    stride = 64\n    img = torch.zeros((2, 3, stride, stride), device=next(model.parameters()).device)\n    flops, params = profile(deepcopy(model), inputs=(img.split(1,dim=0),), verbose=False)\n    params /= 1e6\n    flops /= 1e9\n    flops *= tsize[0] * tsize[1] / stride / stride * 2  # Gflops\n    info = \"Params: {:.2f}M, Gflops: {:.2f}\".format(params, flops)\n    return info\n\n\ndef fuse_conv_and_bn(conv, bn):\n    # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n    fusedconv = (\n        nn.Conv2d(\n            conv.in_channels,\n            conv.out_channels,\n            kernel_size=conv.kernel_size,\n            stride=conv.stride,\n            padding=conv.padding,\n            groups=conv.groups,\n            bias=True,\n        )\n        .requires_grad_(False)\n        .to(conv.weight.device)\n    )\n\n    # prepare filters\n    w_conv = conv.weight.clone().view(conv.out_channels, -1)\n    w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n    fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))\n\n    # prepare spatial bias\n    b_conv = (\n        torch.zeros(conv.weight.size(0), device=conv.weight.device)\n        if conv.bias is None\n        else conv.bias\n    )\n    b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(\n        torch.sqrt(bn.running_var + bn.eps)\n    )\n    fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n    return fusedconv\n\n\ndef fuse_model(model):\n    from yolox.models.network_blocks import BaseConv\n\n    for m in model.modules():\n        if type(m) is BaseConv and hasattr(m, \"bn\"):\n            m.conv = fuse_conv_and_bn(m.conv, m.bn)  # update conv\n            delattr(m, \"bn\")  # remove batchnorm\n            m.forward = m.fuseforward  # update forward\n    return model\n\n\ndef replace_module(module, replaced_module_type, new_module_type, replace_func=None):\n    \"\"\"\n    Replace given type in module to a new type. mostly used in deploy.\n\n    Args:\n        module (nn.Module): model to apply replace operation.\n        replaced_module_type (Type): module type to be replaced.\n        new_module_type (Type)\n        replace_func (function): python function to describe replace logic. Defalut value None.\n\n    Returns:\n        model (nn.Module): module that already been replaced.\n    \"\"\"\n\n    def default_replace_func(replaced_module_type, new_module_type):\n        return new_module_type()\n\n    if replace_func is None:\n        replace_func = default_replace_func\n\n    model = module\n    if isinstance(module, replaced_module_type):\n        model = replace_func(replaced_module_type, new_module_type)\n    else:  # recurrsively replace\n        for name, child in module.named_children():\n            new_child = replace_module(child, replaced_module_type, new_module_type)\n            if new_child is not child:  # child is already replaced\n                model.add_module(name, new_child)\n\n    return model\n"
  },
  {
    "path": "yolox/utils/setup_env.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport cv2\n\nimport os\nimport subprocess\n\n__all__ = [\"configure_nccl\", \"configure_module\"]\n\n\ndef configure_nccl():\n    \"\"\"Configure multi-machine environment variables of NCCL.\"\"\"\n    os.environ[\"NCCL_LAUNCH_MODE\"] = \"PARALLEL\"\n    os.environ[\"NCCL_IB_HCA\"] = subprocess.getoutput(\n        \"pushd /sys/class/infiniband/ > /dev/null; for i in mlx5_*; \"\n        \"do cat $i/ports/1/gid_attrs/types/* 2>/dev/null \"\n        \"| grep v >/dev/null && echo $i ; done; popd > /dev/null\"\n    )\n    os.environ[\"NCCL_IB_GID_INDEX\"] = \"3\"\n    os.environ[\"NCCL_IB_TC\"] = \"106\"\n\n\ndef configure_module(ulimit_value=8192):\n    \"\"\"\n    Configure pytorch module environment. setting of ulimit and cv2 will be set.\n\n    Args:\n        ulimit_value(int): default open file number on linux. Default value: 8192.\n    \"\"\"\n    # system setting\n    try:\n        import resource\n\n        rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n        resource.setrlimit(resource.RLIMIT_NOFILE, (ulimit_value, rlimit[1]))\n    except Exception:\n        # Exception might be raised in Windows OS or rlimit reaches max limit number.\n        # However, set rlimit value might not be necessary.\n        pass\n\n    # cv2\n    # multiprocess might be harmful on performance of torch dataloader\n    os.environ[\"OPENCV_OPENCL_RUNTIME\"] = \"disabled\"\n    try:\n        cv2.setNumThreads(0)\n        cv2.ocl.setUseOpenCL(False)\n    except Exception:\n        # cv2 version mismatch might rasie exceptions.\n        pass\n"
  },
  {
    "path": "yolox/utils/visualize.py",
    "content": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport cv2\nimport numpy as np\n\n__all__ = [\"vis\"]\n\n\ndef vis(img, boxes, scores, cls_ids, conf=0.5, class_names=None):\n\n    for i in range(len(boxes)):\n        box = boxes[i]\n        cls_id = int(cls_ids[i])\n        score = scores[i]\n        if score < conf:\n            continue\n        x0 = int(box[0])\n        y0 = int(box[1])\n        x1 = int(box[2])\n        y1 = int(box[3])\n\n        color = (_COLORS[cls_id] * 255).astype(np.uint8).tolist()\n        text = '{}:{:.1f}%'.format(class_names[cls_id], score * 100)\n        txt_color = (0, 0, 0) if np.mean(_COLORS[cls_id]) > 0.5 else (255, 255, 255)\n        font = cv2.FONT_HERSHEY_SIMPLEX\n\n        txt_size = cv2.getTextSize(text, font, 0.4, 1)[0]\n        cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)\n\n        txt_bk_color = (_COLORS[cls_id] * 255 * 0.7).astype(np.uint8).tolist()\n        cv2.rectangle(\n            img,\n            (x0, y0 + 1),\n            (x0 + txt_size[0] + 1, y0 + int(1.5*txt_size[1])),\n            txt_bk_color,\n            -1\n        )\n        cv2.putText(img, text, (x0, y0 + txt_size[1]), font, 0.4, txt_color, thickness=1)\n\n    return img\n\n\ndef get_color(idx):\n    idx = idx * 3\n    color = ((37 * idx) % 255, (17 * idx) % 255, (29 * idx) % 255)\n\n    return color\n\n\ndef plot_tracking(image, tlwhs, obj_ids, scores=None, frame_id=0, fps=0., ids2=None):\n    im = np.ascontiguousarray(np.copy(image))\n    im_h, im_w = im.shape[:2]\n\n    top_view = np.zeros([im_w, im_w, 3], dtype=np.uint8) + 255\n\n    #text_scale = max(1, image.shape[1] / 1600.)\n    #text_thickness = 2\n    #line_thickness = max(1, int(image.shape[1] / 500.))\n    text_scale = 2\n    text_thickness = 2\n    line_thickness = 3\n\n    radius = max(5, int(im_w/140.))\n    cv2.putText(im, 'frame: %d fps: %.2f num: %d' % (frame_id, fps, len(tlwhs)),\n                (0, int(15 * text_scale)), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), thickness=2)\n\n    for i, tlwh in enumerate(tlwhs):\n        x1, y1, w, h = tlwh\n        intbox = tuple(map(int, (x1, y1, x1 + w, y1 + h)))\n        obj_id = int(obj_ids[i])\n        id_text = '{}'.format(int(obj_id))\n        if ids2 is not None:\n            id_text = id_text + ', {}'.format(int(ids2[i]))\n        color = get_color(abs(obj_id))\n        cv2.rectangle(im, intbox[0:2], intbox[2:4], color=color, thickness=line_thickness)\n        cv2.putText(im, id_text, (intbox[0], intbox[1]), cv2.FONT_HERSHEY_PLAIN, text_scale, (0, 0, 255),\n                    thickness=text_thickness)\n    return im\n\n\n_COLORS = np.array(\n    [\n        0.000, 0.447, 0.741,\n        0.850, 0.325, 0.098,\n        0.929, 0.694, 0.125,\n        0.494, 0.184, 0.556,\n        0.466, 0.674, 0.188,\n        0.301, 0.745, 0.933,\n        0.635, 0.078, 0.184,\n        0.300, 0.300, 0.300,\n        0.600, 0.600, 0.600,\n        1.000, 0.000, 0.000,\n        1.000, 0.500, 0.000,\n        0.749, 0.749, 0.000,\n        0.000, 1.000, 0.000,\n        0.000, 0.000, 1.000,\n        0.667, 0.000, 1.000,\n        0.333, 0.333, 0.000,\n        0.333, 0.667, 0.000,\n        0.333, 1.000, 0.000,\n        0.667, 0.333, 0.000,\n        0.667, 0.667, 0.000,\n        0.667, 1.000, 0.000,\n        1.000, 0.333, 0.000,\n        1.000, 0.667, 0.000,\n        1.000, 1.000, 0.000,\n        0.000, 0.333, 0.500,\n        0.000, 0.667, 0.500,\n        0.000, 1.000, 0.500,\n        0.333, 0.000, 0.500,\n        0.333, 0.333, 0.500,\n        0.333, 0.667, 0.500,\n        0.333, 1.000, 0.500,\n        0.667, 0.000, 0.500,\n        0.667, 0.333, 0.500,\n        0.667, 0.667, 0.500,\n        0.667, 1.000, 0.500,\n        1.000, 0.000, 0.500,\n        1.000, 0.333, 0.500,\n        1.000, 0.667, 0.500,\n        1.000, 1.000, 0.500,\n        0.000, 0.333, 1.000,\n        0.000, 0.667, 1.000,\n        0.000, 1.000, 1.000,\n        0.333, 0.000, 1.000,\n        0.333, 0.333, 1.000,\n        0.333, 0.667, 1.000,\n        0.333, 1.000, 1.000,\n        0.667, 0.000, 1.000,\n        0.667, 0.333, 1.000,\n        0.667, 0.667, 1.000,\n        0.667, 1.000, 1.000,\n        1.000, 0.000, 1.000,\n        1.000, 0.333, 1.000,\n        1.000, 0.667, 1.000,\n        0.333, 0.000, 0.000,\n        0.500, 0.000, 0.000,\n        0.667, 0.000, 0.000,\n        0.833, 0.000, 0.000,\n        1.000, 0.000, 0.000,\n        0.000, 0.167, 0.000,\n        0.000, 0.333, 0.000,\n        0.000, 0.500, 0.000,\n        0.000, 0.667, 0.000,\n        0.000, 0.833, 0.000,\n        0.000, 1.000, 0.000,\n        0.000, 0.000, 0.167,\n        0.000, 0.000, 0.333,\n        0.000, 0.000, 0.500,\n        0.000, 0.000, 0.667,\n        0.000, 0.000, 0.833,\n        0.000, 0.000, 1.000,\n        0.000, 0.000, 0.000,\n        0.143, 0.143, 0.143,\n        0.286, 0.286, 0.286,\n        0.429, 0.429, 0.429,\n        0.571, 0.571, 0.571,\n        0.714, 0.714, 0.714,\n        0.857, 0.857, 0.857,\n        0.000, 0.447, 0.741,\n        0.314, 0.717, 0.741,\n        0.50, 0.5, 0\n    ]\n).astype(np.float32).reshape(-1, 3)\n"
  }
]